diff --git a/.gitignore b/.gitignore index d325d3bd49d..3db5f73ddd2 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ package-lock.json *.tern-port */**/.tern-port .DS_Store +.vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 7a73a41bfdf..00000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} \ No newline at end of file diff --git a/README.md b/README.md index 5756258b737..8b877993dbd 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Windows License ======= -Copyright (c) 2014-2019 [Rancher Labs, Inc.](http://rancher.com) +Copyright (c) 2014-2019 [Rancher Labs, Inc.](https://rancher.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/static/img/os/Rancher_aws1.png b/assets/img/os/Rancher_aws1.png similarity index 100% rename from static/img/os/Rancher_aws1.png rename to assets/img/os/Rancher_aws1.png diff --git a/static/img/os/Rancher_aws2.png b/assets/img/os/Rancher_aws2.png similarity index 100% rename from static/img/os/Rancher_aws2.png rename to assets/img/os/Rancher_aws2.png diff --git a/static/img/os/Rancher_aws3.png b/assets/img/os/Rancher_aws3.png similarity index 100% rename from static/img/os/Rancher_aws3.png rename to assets/img/os/Rancher_aws3.png diff --git a/static/img/os/Rancher_aws4.png b/assets/img/os/Rancher_aws4.png similarity index 100% rename from static/img/os/Rancher_aws4.png rename to assets/img/os/Rancher_aws4.png diff --git a/static/img/os/Rancher_aws5.png b/assets/img/os/Rancher_aws5.png similarity index 100% rename from static/img/os/Rancher_aws5.png rename to assets/img/os/Rancher_aws5.png diff --git a/static/img/os/Rancher_aws6.png b/assets/img/os/Rancher_aws6.png similarity index 100% rename from static/img/os/Rancher_aws6.png rename to assets/img/os/Rancher_aws6.png diff --git a/static/img/os/Rancher_busydash.png b/assets/img/os/Rancher_busydash.png similarity index 100% rename from static/img/os/Rancher_busydash.png rename to assets/img/os/Rancher_busydash.png diff --git a/static/img/os/rancheroshowitworks.png b/assets/img/os/rancheroshowitworks.png similarity index 100% rename from static/img/os/rancheroshowitworks.png rename to assets/img/os/rancheroshowitworks.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-1.png b/assets/img/rancher/adfs/adfs-add-rpt-1.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-1.png rename to assets/img/rancher/adfs/adfs-add-rpt-1.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-10.png b/assets/img/rancher/adfs/adfs-add-rpt-10.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-10.png rename to assets/img/rancher/adfs/adfs-add-rpt-10.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-11.png b/assets/img/rancher/adfs/adfs-add-rpt-11.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-11.png rename to assets/img/rancher/adfs/adfs-add-rpt-11.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-2.png b/assets/img/rancher/adfs/adfs-add-rpt-2.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-2.png rename to assets/img/rancher/adfs/adfs-add-rpt-2.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-3.png b/assets/img/rancher/adfs/adfs-add-rpt-3.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-3.png rename to assets/img/rancher/adfs/adfs-add-rpt-3.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-4.png b/assets/img/rancher/adfs/adfs-add-rpt-4.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-4.png rename to assets/img/rancher/adfs/adfs-add-rpt-4.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-5.png b/assets/img/rancher/adfs/adfs-add-rpt-5.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-5.png rename to assets/img/rancher/adfs/adfs-add-rpt-5.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-6.png b/assets/img/rancher/adfs/adfs-add-rpt-6.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-6.png rename to assets/img/rancher/adfs/adfs-add-rpt-6.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-7.png b/assets/img/rancher/adfs/adfs-add-rpt-7.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-7.png rename to assets/img/rancher/adfs/adfs-add-rpt-7.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-8.png b/assets/img/rancher/adfs/adfs-add-rpt-8.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-8.png rename to assets/img/rancher/adfs/adfs-add-rpt-8.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-9.png b/assets/img/rancher/adfs/adfs-add-rpt-9.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-9.png rename to assets/img/rancher/adfs/adfs-add-rpt-9.png diff --git a/static/img/rancher/adfs/adfs-add-tcr-1.png b/assets/img/rancher/adfs/adfs-add-tcr-1.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-tcr-1.png rename to assets/img/rancher/adfs/adfs-add-tcr-1.png diff --git a/static/img/rancher/adfs/adfs-add-tcr-2.png b/assets/img/rancher/adfs/adfs-add-tcr-2.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-tcr-2.png rename to assets/img/rancher/adfs/adfs-add-tcr-2.png diff --git a/static/img/rancher/adfs/adfs-edit-cr.png b/assets/img/rancher/adfs/adfs-edit-cr.png similarity index 100% rename from static/img/rancher/adfs/adfs-edit-cr.png rename to assets/img/rancher/adfs/adfs-edit-cr.png diff --git a/static/img/rancher/adfs/adfs-overview.png b/assets/img/rancher/adfs/adfs-overview.png similarity index 100% rename from static/img/rancher/adfs/adfs-overview.png rename to assets/img/rancher/adfs/adfs-overview.png diff --git a/static/img/rancher/airgap/edit-system-default-registry.png b/assets/img/rancher/airgap/edit-system-default-registry.png similarity index 100% rename from static/img/rancher/airgap/edit-system-default-registry.png rename to assets/img/rancher/airgap/edit-system-default-registry.png diff --git a/static/img/rancher/airgap/enter-system-default-registry.png b/assets/img/rancher/airgap/enter-system-default-registry.png similarity index 100% rename from static/img/rancher/airgap/enter-system-default-registry.png rename to assets/img/rancher/airgap/enter-system-default-registry.png diff --git a/static/img/rancher/airgap/privateregistry.svg b/assets/img/rancher/airgap/privateregistry.svg similarity index 100% rename from static/img/rancher/airgap/privateregistry.svg rename to assets/img/rancher/airgap/privateregistry.svg diff --git a/static/img/rancher/airgap/privateregistrypushpull.svg b/assets/img/rancher/airgap/privateregistrypushpull.svg similarity index 100% rename from static/img/rancher/airgap/privateregistrypushpull.svg rename to assets/img/rancher/airgap/privateregistrypushpull.svg diff --git a/static/img/rancher/airgap/settings.png b/assets/img/rancher/airgap/settings.png similarity index 100% rename from static/img/rancher/airgap/settings.png rename to assets/img/rancher/airgap/settings.png diff --git a/static/img/rancher/airgap/system-charts-setting.png b/assets/img/rancher/airgap/system-charts-setting.png similarity index 100% rename from static/img/rancher/airgap/system-charts-setting.png rename to assets/img/rancher/airgap/system-charts-setting.png diff --git a/static/img/rancher/airgap/system-charts-update.png b/assets/img/rancher/airgap/system-charts-update.png similarity index 100% rename from static/img/rancher/airgap/system-charts-update.png rename to assets/img/rancher/airgap/system-charts-update.png diff --git a/static/img/rancher/bpg/hub-and-spoke.png b/assets/img/rancher/bpg/hub-and-spoke.png similarity index 100% rename from static/img/rancher/bpg/hub-and-spoke.png rename to assets/img/rancher/bpg/hub-and-spoke.png diff --git a/static/img/rancher/bpg/regional.png b/assets/img/rancher/bpg/regional.png similarity index 100% rename from static/img/rancher/bpg/regional.png rename to assets/img/rancher/bpg/regional.png diff --git a/static/img/rancher/bulk-key-values.gif b/assets/img/rancher/bulk-key-values.gif similarity index 100% rename from static/img/rancher/bulk-key-values.gif rename to assets/img/rancher/bulk-key-values.gif diff --git a/static/img/rancher/canal-diagram.png b/assets/img/rancher/canal-diagram.png similarity index 100% rename from static/img/rancher/canal-diagram.png rename to assets/img/rancher/canal-diagram.png diff --git a/static/img/rancher/globalpermissionrole.png b/assets/img/rancher/globalpermissionrole.png similarity index 100% rename from static/img/rancher/globalpermissionrole.png rename to assets/img/rancher/globalpermissionrole.png diff --git a/static/img/rancher/globalpermissionuser.png b/assets/img/rancher/globalpermissionuser.png similarity index 100% rename from static/img/rancher/globalpermissionuser.png rename to assets/img/rancher/globalpermissionuser.png diff --git a/static/img/rancher/ha/nlb/add-targets-targetgroup-443.png b/assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/add-targets-targetgroup-443.png rename to assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/added-targets-targetgroup-443.png b/assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/added-targets-targetgroup-443.png rename to assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-443-advanced.png b/assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-443-advanced.png rename to assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-443.png b/assets/img/rancher/ha/nlb/create-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-443.png rename to assets/img/rancher/ha/nlb/create-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-80-advanced.png b/assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-80-advanced.png rename to assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-80.png b/assets/img/rancher/ha/nlb/create-targetgroup-80.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-80.png rename to assets/img/rancher/ha/nlb/create-targetgroup-80.png diff --git a/static/img/rancher/ha/nlb/ec2-loadbalancing.png b/assets/img/rancher/ha/nlb/ec2-loadbalancing.png similarity index 100% rename from static/img/rancher/ha/nlb/ec2-loadbalancing.png rename to assets/img/rancher/ha/nlb/ec2-loadbalancing.png diff --git a/static/img/rancher/ha/nlb/edit-targetgroup-443.png b/assets/img/rancher/ha/nlb/edit-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/edit-targetgroup-443.png rename to assets/img/rancher/ha/nlb/edit-targetgroup-443.png diff --git a/static/img/rancher/ldapsearch-group.png b/assets/img/rancher/ldapsearch-group.png similarity index 100% rename from static/img/rancher/ldapsearch-group.png rename to assets/img/rancher/ldapsearch-group.png diff --git a/static/img/rancher/ldapsearch-user.png b/assets/img/rancher/ldapsearch-user.png similarity index 100% rename from static/img/rancher/ldapsearch-user.png rename to assets/img/rancher/ldapsearch-user.png diff --git a/assets/img/rancher/rancher_overview.png b/assets/img/rancher/rancher_overview.png new file mode 100644 index 00000000000..c445fec3710 Binary files /dev/null and b/assets/img/rancher/rancher_overview.png differ diff --git a/assets/img/rancher/rancher_overview_2.png b/assets/img/rancher/rancher_overview_2.png new file mode 100644 index 00000000000..00ce8eb2c27 Binary files /dev/null and b/assets/img/rancher/rancher_overview_2.png differ diff --git a/static/img/rancher/rancherroles1.png b/assets/img/rancher/rancherroles1.png similarity index 100% rename from static/img/rancher/rancherroles1.png rename to assets/img/rancher/rancherroles1.png diff --git a/static/img/rancher/rancheruser.png b/assets/img/rancher/rancheruser.png similarity index 100% rename from static/img/rancher/rancheruser.png rename to assets/img/rancher/rancheruser.png diff --git a/static/img/rancher/set-hostport.gif b/assets/img/rancher/set-hostport.gif similarity index 100% rename from static/img/rancher/set-hostport.gif rename to assets/img/rancher/set-hostport.gif diff --git a/static/img/rancher/set-nodeport.gif b/assets/img/rancher/set-nodeport.gif similarity index 100% rename from static/img/rancher/set-nodeport.gif rename to assets/img/rancher/set-nodeport.gif diff --git a/static/img/rancher/vsphere-cluster-create-1.png b/assets/img/rancher/vsphere-cluster-create-1.png similarity index 100% rename from static/img/rancher/vsphere-cluster-create-1.png rename to assets/img/rancher/vsphere-cluster-create-1.png diff --git a/static/img/rancher/vsphere-node-driver-cloudprovider.png b/assets/img/rancher/vsphere-node-driver-cloudprovider.png similarity index 100% rename from static/img/rancher/vsphere-node-driver-cloudprovider.png rename to assets/img/rancher/vsphere-node-driver-cloudprovider.png diff --git a/static/img/rancher/vsphere-node-template-1.png b/assets/img/rancher/vsphere-node-template-1.png similarity index 100% rename from static/img/rancher/vsphere-node-template-1.png rename to assets/img/rancher/vsphere-node-template-1.png diff --git a/static/img/rancher/vsphere-node-template-2.png b/assets/img/rancher/vsphere-node-template-2.png similarity index 100% rename from static/img/rancher/vsphere-node-template-2.png rename to assets/img/rancher/vsphere-node-template-2.png diff --git a/static/img/rancher/vsphere-storage-class.png b/assets/img/rancher/vsphere-storage-class.png similarity index 100% rename from static/img/rancher/vsphere-storage-class.png rename to assets/img/rancher/vsphere-storage-class.png diff --git a/static/img/rancher/workload-add-volume.png b/assets/img/rancher/workload-add-volume.png similarity index 100% rename from static/img/rancher/workload-add-volume.png rename to assets/img/rancher/workload-add-volume.png diff --git a/static/img/rke/rke-etcd-backup.png b/assets/img/rke/rke-etcd-backup.png similarity index 100% rename from static/img/rke/rke-etcd-backup.png rename to assets/img/rke/rke-etcd-backup.png diff --git a/static/img/rke/vsphere-advanced-parameters.png b/assets/img/rke/vsphere-advanced-parameters.png similarity index 100% rename from static/img/rke/vsphere-advanced-parameters.png rename to assets/img/rke/vsphere-advanced-parameters.png diff --git a/static/img/rke/vsphere-nodedriver-enable-uuid.png b/assets/img/rke/vsphere-nodedriver-enable-uuid.png similarity index 100% rename from static/img/rke/vsphere-nodedriver-enable-uuid.png rename to assets/img/rke/vsphere-nodedriver-enable-uuid.png diff --git a/assets/sass/app.scss b/assets/sass/app.scss index 127d70636ff..679a9be83c8 100644 --- a/assets/sass/app.scss +++ b/assets/sass/app.scss @@ -13,3 +13,7 @@ display: none; visibility: hidden; } + +pre > code { + padding: 0; +} \ No newline at end of file diff --git a/config.toml b/config.toml index 67d8a56f732..4cdea8ac743 100644 --- a/config.toml +++ b/config.toml @@ -5,6 +5,7 @@ title = "Rancher Labs" theme = "rancher-website-theme" themesDir = "node_modules" pluralizeListTitles = false +timeout = 30000 enableRobotsTXT = true pygmentsCodeFences = true diff --git a/content/k3s/latest/en/_index.md b/content/k3s/latest/en/_index.md index 6035751057e..58cb7baa2e8 100644 --- a/content/k3s/latest/en/_index.md +++ b/content/k3s/latest/en/_index.md @@ -1,7 +1,6 @@ --- -title: "K3S - 5 less than k8s" -shortTitle: K3S -date: 2019-02-05T09:52:46-07:00 +title: "K3s - 5 less than K8s" +shortTitle: K3s name: "menu" --- @@ -15,21 +14,16 @@ Great for: * ARM * Situations where a PhD in k8s clusterology is infeasible -What is this? ---- +# What is K3s? -k3s is intended to be a fully compliant Kubernetes distribution with the following changes: +K3s is a fully compliant Kubernetes distribution with the following enhancements: -1. Legacy, alpha, non-default features are removed. Hopefully, you shouldn't notice the - stuff that has been removed. -2. Removed most in-tree plugins (cloud providers and storage plugins) which can be replaced - with out of tree addons. -3. Add sqlite3 as the default storage mechanism. etcd3 is still available, but not the default. -4. Wrapped in simple launcher that handles a lot of the complexity of TLS and options. -5. Minimal to no OS dependencies (just a sane kernel and cgroup mounts needed). k3s packages required - dependencies +* An embedded SQLite database has replaced etcd as the default datastore. External datastores such as PostgreSQL, MySQL, and etcd are also supported. +* Simple but powerful "batteries-included" features have been added, such as: a local storage provider, a service load balancer, a Helm controller, and the Traefik ingress controller. +* Operation of all Kubernetes control plane components is encapsulated in a single binary and process. This allows K3s to automate and manage complex cluster operations like distributing certificates. +* In-tree cloud providers and storage plugins have been removed. +* External dependencies have been minimized (just a modern kernel and cgroup mounts needed). K3s packages required dependencies, including: * containerd * Flannel * CoreDNS - * CNI * Host utilities (iptables, socat, etc) diff --git a/content/k3s/latest/en/advanced/_index.md b/content/k3s/latest/en/advanced/_index.md new file mode 100644 index 00000000000..a7b3a4262e0 --- /dev/null +++ b/content/k3s/latest/en/advanced/_index.md @@ -0,0 +1,164 @@ +--- +title: "Advanced Options and Configuration" +weight: 45 +aliases: + - /k3s/latest/en/running/ + - /k3s/latest/en/configuration/ +--- + +This section contains advanced information describing the different ways you can run and manage K3s: + +- [Auto-deploying manifests](#auto-deploying-manifests) +- [Using Docker as the container runtime](#using-docker-as-the-container-runtime) +- [Running K3s with RootlessKit (Experimental)](#running-k3s-with-rootlesskit-experimental) +- [Node labels and taints](#node-labels-and-taints) +- [Starting the server with the installation script](#starting-the-server-with-the-installation-script) +- [Additional preparation for Alpine Linux setup](#additional-preparation-for-alpine-linux-setup) +- [Running K3d (K3s in Docker) and docker-compose](#running-k3d-k3s-in-docker-and-docker-compose) + +# Auto-Deploying Manifests + +Any file found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. + +For information about deploying Helm charts, refer to the section about [Helm.](../helm) + +# Using Docker as the Container Runtime + +K3s includes and defaults to [containerd,](https://containerd.io/) an industry-standard container runtime. If you want to use Docker instead of containerd then you simply need to run the agent with the `--docker` flag. + +K3s will generate config.toml for containerd in `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`. For advanced customization for this file you can create another file called `config.toml.tmpl` in the same directory and it will be used instead. + +The `config.toml.tmpl` will be treated as a Golang template file, and the `config.Node` structure is being passed to the template, the following is an example on how to use the structure to customize the configuration file https://github.com/rancher/k3s/blob/master/pkg/agent/templates/templates.go#L16-L32 + +# Running K3s with RootlessKit (Experimental) + +> **Warning:** This feature is experimental. + +RootlessKit is a kind of Linux-native "fake root" utility, made for mainly [running Docker and Kubernetes as an unprivileged user,](https://github.com/rootless-containers/usernetes) so as to protect the real root on the host from potential container-breakout attacks. + +Initial rootless support has been added but there are a series of significant usability issues surrounding it. + +We are releasing the initial support for those interested in rootless and hopefully some people can help to improve the usability. First, ensure you have a proper setup and support for user namespaces. Refer to the [requirements section](https://github.com/rootless-containers/rootlesskit#setup) in RootlessKit for instructions. +In short, latest Ubuntu is your best bet for this to work. + +### Known Issues with RootlessKit + +* **Ports** + + When running rootless a new network namespace is created. This means that K3s instance is running with networking fairly detached from the host. The only way to access services run in K3s from the host is to set up port forwards to the K3s network namespace. We have a controller that will automatically bind 6443 and service port below 1024 to the host with an offset of 10000. + + That means service port 80 will become 10080 on the host, but 8080 will become 8080 without any offset. + + Currently, only `LoadBalancer` services are automatically bound. + +* **Daemon lifecycle** + + Once you kill K3s and then start a new instance of K3s it will create a new network namespace, but it doesn't kill the old pods. So you are left + with a fairly broken setup. This is the main issue at the moment, how to deal with the network namespace. + + The issue is tracked in https://github.com/rootless-containers/rootlesskit/issues/65 + +* **Cgroups** + + Cgroups are not supported. + +### Running Servers and Agents with Rootless + +Just add `--rootless` flag to either server or agent. So run `k3s server --rootless` and then look for the message `Wrote kubeconfig [SOME PATH]` for where your kubeconfig file is. + +For more information about setting up the kubeconfig file, refer to the [section about cluster access.](../cluster-access) + +> Be careful, if you use `-o` to write the kubeconfig to a different directory it will probably not work. This is because the K3s instance in running in a different mount namespace. + +# Node Labels and Taints + +K3s agents can be configured with the options `--node-label` and `--node-taint` which adds a label and taint to the kubelet. The two options only add labels and/or taints [at registration time,]({{}}/k3s/latest/en/installation/install-options/#node-labels-and-taints-for-agents) so they can only be added once and not changed after that again by running K3s commands. + +If you want to change node labels and taints after node registration you should use `kubectl`. Refer to the official Kubernetes documentation for details on how to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) and [node labels.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) + +# Starting the Server with the Installation Script + +The installation script will auto-detect if your OS is using systemd or openrc and start the service. +When running with openrc, logs will be created at `/var/log/k3s.log`. + +When running with systemd, logs will be created in `/var/log/syslog` and viewed using `journalctl -u k3s`. + +An example of installing and auto-starting with the install script: + +```bash +curl -sfL https://get.k3s.io | sh - +``` + +When running the server manually you should get an output similar to the following: + +``` +$ k3s server +INFO[2019-01-22T15:16:19.908493986-07:00] Starting k3s dev +INFO[2019-01-22T15:16:19.908934479-07:00] Running kube-apiserver --allow-privileged=true --authorization-mode Node,RBAC --service-account-signing-key-file /var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range 10.43.0.0/16 --advertise-port 6445 --advertise-address 127.0.0.1 --insecure-port 0 --secure-port 6444 --bind-address 127.0.0.1 --tls-cert-file /var/lib/rancher/k3s/server/tls/localhost.crt --tls-private-key-file /var/lib/rancher/k3s/server/tls/localhost.key --service-account-key-file /var/lib/rancher/k3s/server/tls/service.key --service-account-issuer k3s --api-audiences unknown --basic-auth-file /var/lib/rancher/k3s/server/cred/passwd --kubelet-client-certificate /var/lib/rancher/k3s/server/tls/token-node.crt --kubelet-client-key /var/lib/rancher/k3s/server/tls/token-node.key +Flag --insecure-port has been deprecated, This flag will be removed in a future version. +INFO[2019-01-22T15:16:20.196766005-07:00] Running kube-scheduler --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --port 0 --secure-port 0 --leader-elect=false +INFO[2019-01-22T15:16:20.196880841-07:00] Running kube-controller-manager --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --service-account-private-key-file /var/lib/rancher/k3s/server/tls/service.key --allocate-node-cidrs --cluster-cidr 10.42.0.0/16 --root-ca-file /var/lib/rancher/k3s/server/tls/token-ca.crt --port 0 --secure-port 0 --leader-elect=false +Flag --port has been deprecated, see --secure-port instead. +INFO[2019-01-22T15:16:20.273441984-07:00] Listening on :6443 +INFO[2019-01-22T15:16:20.278383446-07:00] Writing manifest: /var/lib/rancher/k3s/server/manifests/coredns.yaml +INFO[2019-01-22T15:16:20.474454524-07:00] Node token is available at /var/lib/rancher/k3s/server/node-token +INFO[2019-01-22T15:16:20.474471391-07:00] To join node to cluster: k3s agent -s https://10.20.0.3:6443 -t ${NODE_TOKEN} +INFO[2019-01-22T15:16:20.541027133-07:00] Wrote kubeconfig /etc/rancher/k3s/k3s.yaml +INFO[2019-01-22T15:16:20.541049100-07:00] Run: k3s kubectl +``` + +The output will likely be much longer as the agent will create a lot of logs. By default the server +will register itself as a node (run the agent). + +# Additional Preparation for Alpine Linux Setup + +In order to set up Alpine Linux, you have to go through the following preparation: + +Update **/etc/update-extlinux.conf** by adding: + +``` +default_kernel_opts="... cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory" +``` + +Then update the config and reboot: + +```bash +update-extlinux +reboot +``` + +# Running K3d (K3s in Docker) and docker-compose + +[k3d](https://github.com/rancher/k3d) is a utility designed to easily run K3s in Docker. + +It can be installed via the the [brew](https://brew.sh/) utility on MacOS: + +``` +brew install k3d +``` + +`rancher/k3s` images are also available to run the K3s server and agent from Docker. + +A `docker-compose.yml` is in the root of the K3s repo that serves as an example of how to run K3s from Docker. To run from `docker-compose` from this repo, run: + + docker-compose up --scale agent=3 + # kubeconfig is written to current dir + + kubectl --kubeconfig kubeconfig.yaml get node + + NAME STATUS ROLES AGE VERSION + 497278a2d6a2 Ready 11s v1.13.2-k3s2 + d54c8b17c055 Ready 11s v1.13.2-k3s2 + db7a5a5a5bdd Ready 12s v1.13.2-k3s2 + +To run the agent only in Docker, use `docker-compose up agent`. + +Alternatively the `docker run` command can also be used: + + sudo docker run \ + -d --tmpfs /run \ + --tmpfs /var/run \ + -e K3S_URL=${SERVER_URL} \ + -e K3S_TOKEN=${NODE_TOKEN} \ + --privileged rancher/k3s:vX.Y.Z + diff --git a/content/k3s/latest/en/architecture/_index.md b/content/k3s/latest/en/architecture/_index.md new file mode 100644 index 00000000000..0b04ddbfd8c --- /dev/null +++ b/content/k3s/latest/en/architecture/_index.md @@ -0,0 +1,54 @@ +--- +title: Architecture +weight: 1 +--- + +This page describes the architecture of a high-availability K3s server cluster and how it differs from a single-node server cluster. + +It also describes how agent nodes are registered with K3s servers. + +A server node is defined as a machine (bare-metal or virtual) running the `k3s server` command. A worker node is defined as a machine running the `k3s agent` command. + +This page covers the following topics: + +- [Single-server setup with an embedded database](#single-server-setup-with-an-embedded-db) +- [High-availability K3s server with an external database](#high-availability-k3s-server-with-an-external-db) + - [Fixed registration address for agent nodes](#fixed-registration-address-for-agent-nodes) +- [How agent node registration works](#how-agent-node-registration-works) + +# Single-server Setup with an Embedded DB + +The following diagram shows an example of a cluster that has a single-node K3s server with an embedded SQLite database. + +In this configuration, each agent node is registered to the same server node. A K3s user can manipulate Kubernetes resources by calling the K3s API on the server node. + +
K3s Architecture with a Single Server
+![Architecture]({{}}/img/rancher/k3s-architecture-single-server.png) + +# High-Availability K3s Server with an External DB + +Single server clusters can meet a variety of use cases, but for environments where uptime of the Kubernetes control plane is critical, you can run K3s in an HA configuration. An HA K3s cluster is comprised of: + +* Two or more **server nodes** that will serve the Kubernetes API and run other control plane services +* An **external datastore** (as opposed to the embedded SQLite datastore used in single-server setups) + +
K3s Architecture with a High-availability Server
+![Architecture]({{< baseurl >}}/img/rancher/k3s-architecture-ha-server.png) + +### Fixed Registration Address for Agent Nodes + +In the high-availability server configuration, each node must also register with the Kubernetes API by using a fixed registration address, as shown in the diagram below. + +After registration, the agent nodes establish a connection directly to one of the server nodes. + +![k3s HA]({{< baseurl >}}/img/k3s/k3s-production-setup.svg) + +# How Agent Node Registration Works + +Agent nodes are registered with a websocket connection initiated by the `k3s agent` process, and the connection is maintained by a client-side load balancer running as part of the agent process. + +Agents will register with the server using the node cluster secret along with a randomly generated password for the node, stored at `/etc/rancher/node/password`. The server will store the passwords for individual nodes at `/var/lib/rancher/k3s/server/cred/node-passwd`, and any subsequent attempts must use the same password. + +If the `/etc/rancher/node` directory of an agent is removed, the password file should be recreated for the agent, or the entry removed from the server. + +A unique node ID can be appended to the hostname by launching K3s servers or agents using the `--with-node-id` flag. \ No newline at end of file diff --git a/content/k3s/latest/en/building/_index.md b/content/k3s/latest/en/building/_index.md deleted file mode 100644 index da075e15a46..00000000000 --- a/content/k3s/latest/en/building/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: "Building from Source" -weight: 10 ---- - -This section provides information on building k3s from source. - -See the [release](https://github.com/rancher/k3s/releases/latest) page for pre-built releases. - -The clone will be much faster on this repo if you do - - git clone --depth 1 https://github.com/rancher/k3s.git - -This repo includes all of Kubernetes history so `--depth 1` will avoid most of that. - -To build the full release binary run `make` and that will create `./dist/artifacts/k3s`. -Optionally to build the binaries without running linting or building docker images: -```sh -./scripts/build && ./scripts/package-cli -``` - -For development, you just need go 1.12 and a sane GOPATH. To compile the binaries run: -```bash -go build -o k3s -go build -o kubectl ./cmd/kubectl -go build -o hyperkube ./vendor/k8s.io/kubernetes/cmd/hyperkube -``` - -This will create the main executable, but it does not include the dependencies like containerd, CNI, -etc. To run a server and agent with all the dependencies for development run the following -helper scripts: -```bash -# Server -./scripts/dev-server.sh - -# Agent -./scripts/dev-agent.sh -``` - - -Kubernetes Source ------------------ - -The source code for Kubernetes is in `vendor/` and the location from which that is copied -is in `./vendor.conf`. Go to the referenced repo/tag and you'll find all the patches applied -to upstream Kubernetes. diff --git a/content/k3s/latest/en/cluster-access/_index.md b/content/k3s/latest/en/cluster-access/_index.md new file mode 100644 index 00000000000..0b07d08b666 --- /dev/null +++ b/content/k3s/latest/en/cluster-access/_index.md @@ -0,0 +1,25 @@ +--- +title: Cluster Access +weight: 21 +--- + +The kubeconfig file is used to configure access to the Kubernetes cluster. It is required to be set up properly in order to access the Kubernetes API such as with kubectl or for installing applications with Helm. You may set the kubeconfig by either exporting the KUBECONFIG environment variable or by specifying a flag for kubectl and helm. Refer to the examples below for details. + +Leverage the KUBECONFIG environment variable: + +``` +export KUBECONFIG=/etc/rancher/k3s/k3s.yaml +kubectl get pods --all-namespaces +helm ls --all-namespaces +``` + +Or specify the location of the kubeconfig file per command: + +``` +kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml get pods --all-namespaces +helm --kubeconfig /etc/rancher/k3s/k3s.yaml ls --all-namespaces +``` + +### Accessing the Cluster from Outside with kubectl + +Copy `/etc/rancher/k3s/k3s.yaml` on your machine located outside the cluster as `~/.kube/config`. Then replace "localhost" with the IP or name of your K3s server. `kubectl` can now manage your K3s cluster. \ No newline at end of file diff --git a/content/k3s/latest/en/configuration/_index.md b/content/k3s/latest/en/configuration/_index.md deleted file mode 100644 index ad1a462c1c8..00000000000 --- a/content/k3s/latest/en/configuration/_index.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: "Configuration Info" -weight: 4 ---- - -This section contains information on using k3s with various configurations. - - -Auto-Deploying Manifests ------------------------- - -Any file found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to -Kubernetes in a manner similar to `kubectl apply`. - -It is also possible to deploy Helm charts. k3s supports a CRD controller for installing charts. A YAML file specification can look as following (example taken from `/var/lib/rancher/k3s/server/manifests/traefik.yaml`): - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: traefik - namespace: kube-system -spec: - chart: stable/traefik - set: - rbac.enabled: "true" - ssl.enabled: "true" -``` - -Keep in mind that `namespace` in your HelmChart resource metadata section should always be `kube-system`, because k3s deploy controller is configured to watch this namespace for new HelmChart resources. If you want to specify the namespace for the actual helm release, you can do that using `targetNamespace` key in the spec section: - -``` -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: grafana - namespace: kube-system -spec: - chart: stable/grafana - targetNamespace: monitoring - set: - adminPassword: "NotVerySafePassword" - valuesContent: |- - image: - tag: master - env: - GF_EXPLORE_ENABLED: true - adminUser: admin - sidecar: - datasources: - enabled: true -``` - -Also note that besides `set` you can use `valuesContent` in the spec section. And it's okay to use both of them. - -k3s versions <= v0.5.0 used `k3s.cattle.io` for the api group of helmcharts, this has been changed to `helm.cattle.io` for later versions. - -Accessing Cluster from Outside ------------------------------ - -Copy `/etc/rancher/k3s/k3s.yaml` on your machine located outside the cluster as `~/.kube/config`. Then replace -"localhost" with the IP or name of your k3s server. `kubectl` can now manage your k3s cluster. - -Open Ports / Network Security ---------------------------- - -The server needs port 6443 to be accessible by the nodes. The nodes need to be able to reach -other nodes over UDP port 8472. This is used for flannel VXLAN. If you don't use flannel -and provide your own custom CNI, then 8472 is not needed by k3s. The node should not listen -on any other port. k3s uses reverse tunneling such that the nodes make outbound connections -to the server and all kubelet traffic runs through that tunnel. - -IMPORTANT. The VXLAN port on nodes should not be exposed to the world, it opens up your -cluster network to accessed by anyone. Run your nodes behind a firewall/security group that -disables access to port 8472. - -Node Registration ------------------ - -Agents will register with the server using the node cluster secret along with a randomly generated -password for the node, stored at `/var/lib/rancher/k3s/agent/node-password.txt`. The server will -store the passwords for individual nodes at `/var/lib/rancher/k3s/server/cred/node-passwd`, and any -subsequent attempts must use the same password. If the data directory of an agent is removed the -password file should be recreated for the agent, or the entry removed from the server. - -Containerd and Docker ----------- - -k3s includes and defaults to containerd. Why? Because it's just plain better. If you want to -run with Docker first stop and think, "Really? Do I really want more headache?" If still -yes then you just need to run the agent with the `--docker` flag. - -k3s will generate config.toml for containerd in `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`, for advanced customization for this file you can create another file called `config.toml.tmpl` in the same directory and it will be used instead. - -The `config.toml.tmpl` will be treated as a Golang template file, and the `config.Node` structure is being passed to the template, the following is an example on how to use the structure to customize the configuration file https://github.com/rancher/k3s/blob/master/pkg/agent/templates/templates.go#L16-L32 - -Rootless --------- - -_**WARNING**:_ Some advanced magic, user beware - -Initial rootless support has been added but there are a series of significant usability issues surrounding it. -We are releasing the initial support for those interested in rootless and hopefully some people can help to -improve the usability. First ensure you have proper setup and support for user namespaces. Refer to the -[requirements section](https://github.com/rootless-containers/rootlesskit#setup) in RootlessKit for instructions. -In short, latest Ubuntu is your best bet for this to work. - - -**Issues w/ Rootless**: - -* **Ports** - - When running rootless a new network namespace is created. This means that k3s instance is running with networking - fairly detached from the host. The only way to access services run in k3s from the host is to setup port forwards - to the k3s network namespace. We have a controller that will automatically bind 6443 and service port below 1024 to the host with an offset of 10000. - - That means service port 80 will become 10080 on the host, but 8080 will become 8080 without any offset. - - Currently, only `LoadBalancer` services are automatically bound. - -* **Daemon lifecycle** - - Once you kill k3s and then start a new instance of k3s it will create a new network namespace, but it doesn't kill the old pods. So you are left - with a fairly broken setup. This is the main issue at the moment, how to deal with the network namespace. - - The issue is tracked in https://github.com/rootless-containers/rootlesskit/issues/65 - -* **Cgroups** - - Cgroups are not supported - -**Running w/ Rootless**: - -Just add `--rootless` flag to either server or agent. So run `k3s server --rootless` and then look for the message -`Wrote kubeconfig [SOME PATH]` for where your kubeconfig to access you cluster is. Be careful, if you use `-o` to write -the kubeconfig to a different directory it will probably not work. This is because the k3s instance in running in a different -mount namespace. - -Node Labels and Taints ----------------------- - -k3s agents can be configured with options `--node-label` and `--node-taint` which adds set of Labels and Taints to kubelet, the two options only adds labels/taints at registration time, so they can only be added once and not changed after that, an example of options to add new label is: -``` - --node-label foo=bar \ - --node-label hello=world \ - --node-taint key1=value1:NoExecute -``` - -Flannel -------- - -Flannel is included by default, if you don't want flannel then run the agent with `--no-flannel` option. - -In this setup you will still be required to install your own CNI driver. More info [here](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network) - -CoreDNS -------- - -CoreDNS is deployed on start of the agent, to disable run the server with the `--no-deploy coredns` option. - -If you don't install CoreDNS you will need to install a cluster DNS provider yourself. - -Traefik -------- - -Traefik is deployed by default when starting the server; to disable it, start the server with the `--no-deploy traefik` option. - -Service Load Balancer ---------------------- - -k3s includes a basic service load balancer that uses available host ports. If you try to create -a load balancer that listens on port 80, for example, it will try to find a free host in the cluster -for port 80. If no port is available the load balancer will stay in Pending. - -To disable the embedded load balancer run the server with the `--no-deploy servicelb` option. This is necessary if you wish to run a different load balancer, such as MetalLB. - -Metrics Server --------------- - -To add functionality for commands such as `k3s kubectl top nodes` metrics-server must be installed, -to install see the instructions located at https://github.com/kubernetes-incubator/metrics-server/. - -**NOTE** : By default the image used in `metrics-server-deployment.yaml` is valid only for **amd64** devices, -this should be edited as appropriate for your architecture. As of this writing metrics-server provides -the following images relevant to k3s: `amd64:v0.3.3`, `arm64:v0.3.2`, and `arm:v0.3.2`. Further information -on the images provided through gcr.io can be found at https://console.cloud.google.com/gcr/images/google-containers/GLOBAL. - -Storage Backends ----------------- - -As of version 0.6.0, k3s can support various storage backends including: SQLite (default), MySQL, Postgres, and etcd, this enhancement depends on the following arguments that can be passed to k3s server: - -* `--storage-backend` _value_ - - Specify storage type etcd3 or kvsql [$`K3S_STORAGE_BACKEND`] - -* `--storage-endpoint` _value_ - - Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$`K3S_STORAGE_ENDPOINT`] - -* `--storage-cafile` _value_ - - SSL Certificate Authority file used to secure storage backend communication [$`K3S_STORAGE_CAFILE`] - -* `--storage-certfile` _value_ - - SSL certification file used to secure storage backend communication [$`K3S_STORAGE_CERTFILE`] - -* `--storage-keyfile` _value_ - - SSL key file used to secure storage backend communication [$`K3S_STORAGE_KEYFILE`] - -### MySQL - -To use k3s with MySQL storage backend, you can specify the following for insecure connection: - -``` - --storage-endpoint="mysql://" -``` -By default the server will attempt to connect to mysql using the mysql socket at `/var/run/mysqld/mysqld.sock` using the root user and with no password, k3s will also create a database with the name `kubernetes` if the database is not specified in the DSN. - -To override the method of connection, user/pass, and database name, you can provide a custom DSN, for example: - -``` - --storage-endpoint="mysql://k3suser:k3spass@tcp(192.168.1.100:3306)/k3stest" -``` - -This command will attempt to connect to MySQL on host `192.168.1.100` on port `3306` with username `k3suser` and password `k3spass` and k3s will automatically create a new database with the name `k3stest` if it doesn't exist, for more information about the MySQL driver data source name, please refer to https://github.com/go-sql-driver/mysql#dsn-data-source-name - -To connect to MySQL securely, you can use the following example: -``` - --storage-endpoint="mysql://k3suser:k3spass@tcp(192.168.1.100:3306)/k3stest" \ - --storage-cafile ca.crt \ - --storage-certfile mysql.crt \ - --storage-keyfile mysql.key -``` -The above command will use these certificates to generate the tls config to communicate with mysql securely. - - -### Postgres - -Connection to postgres can be established using the following command: - -``` - --storage-endpoint="postgres://" -``` - -By default the server will attempt to connect to postgres on localhost with using the `postgres` user and with `postgres` password, k3s will also create a database with the name `kubernetes` if the database is not specified in the DSN. - -To override the method of connection, user/pass, and database name, you can provide a custom DSN, for example: - -``` - --storage-endpoint="postgres://k3suser:k3spass@192.168.1.100:5432/k3stest" -``` - -This command will attempt to connect to Postgres on host `192.168.1.100` on port `5432` with username `k3suser` and password `k3spass` and k3s will automatically create a new database with the name `k3stest` if it doesn't exist, for more information about the Postgres driver data source name, please refer to https://godoc.org/github.com/lib/pq - -To connect to Postgres securely, you can use the following example: - -``` - --storage-endpoint="postgres://k3suser:k3spass@192.168.1.100:5432/k3stest" \ - --storage-certfile postgres.crt \ - --storage-keyfile postgres.key \ - --storage-cafile ca.crt -``` - -The above command will use these certificates to generate the tls config to communicate with postgres securely. - -### etcd - -Connection to etcd3 can be established using the following command: - -``` - --storage-backend=etcd3 \ - --storage-endpoint="https://127.0.0.1:2379" -``` -The above command will attempt to connect insecurely to etcd on localhost with port `2379`, you can connect securely to etcd using the following command: - -``` - --storage-backend=etcd3 \ - --storage-endpoint="https://127.0.0.1:2379" \ - --storage-cafile ca.crt \ - --storage-certfile etcd.crt \ - --storage-keyfile etcd.key -``` - -The above command will use these certificates to generate the tls config to communicate with etcd securely. diff --git a/content/k3s/latest/en/faq/_index.md b/content/k3s/latest/en/faq/_index.md new file mode 100644 index 00000000000..9de75aa8645 --- /dev/null +++ b/content/k3s/latest/en/faq/_index.md @@ -0,0 +1,22 @@ +--- +title: FAQ +weight: 60 +--- + +The FAQ is updated periodically and designed to answer the questions our users most frequently ask about K3s. + +**Is K3s a suitable replacement for k8s?** + +K3s is capable of nearly everything k8s can do. It is just a more lightweight version. See the [main]({{}}/k3s/latest/en/) docs page for more details. + +**How can I use my own Ingress instead of Traefik?** + +Simply start K3s server with `--no-deploy=traefik` and deploy your ingress. + +**Does K3s support Windows?** + +At this time K3s does not natively support Windows, however we are open to the idea in the future. + +**How can I build from source?** + +Please reference the K3s [BUILDING.md](https://github.com/rancher/k3s/blob/master/BUILDING.md) with instructions. diff --git a/content/k3s/latest/en/helm/_index.md b/content/k3s/latest/en/helm/_index.md new file mode 100644 index 00000000000..89231313818 --- /dev/null +++ b/content/k3s/latest/en/helm/_index.md @@ -0,0 +1,101 @@ +--- +title: Helm +weight: 42 +--- + +K3s release _v1.17.0+k3s.1_ added support for Helm 3. You can access the Helm 3 documentation [here](https://helm.sh/docs/intro/quickstart/). + +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. + +K3s does not require any special configuration to start using Helm 3. Just be sure you have properly set up your kubeconfig as per the section about [cluster access.](../cluster-access) + +This section covers the following topics: + +- [Upgrading Helm](#upgrading-helm) +- [Deploying manifests and Helm charts](#deploying-manifests-and-helm-charts) +- [Using the Helm CRD](#using-the-helm-crd) + +### Upgrading Helm + +If you were using Helm v2 in previous versions of K3s, you may upgrade to v1.17.0+k3s.1 or newer and Helm 2 will still function. If you wish to migrate to Helm 3, [this](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) blog post by Helm explains how to use a plugin to successfully migrate. Refer to the official Helm 3 documentation [here](https://helm.sh/docs/) for more information. K3s will handle either Helm v2 or Helm v3 as of v1.17.0+k3s.1. Just be sure you have properly set your kubeconfig as per the examples in the section about [cluster access.](../cluster-access) + +Note that Helm 3 no longer requires Tiller and the `helm init` command. Refer to the official documentation for details. + +### Deploying Manifests and Helm Charts + +Any file found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. + +It is also possible to deploy Helm charts. K3s supports a CRD controller for installing charts. A YAML file specification can look as following (example taken from `/var/lib/rancher/k3s/server/manifests/traefik.yaml`): + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: traefik + namespace: kube-system +spec: + chart: stable/traefik + set: + rbac.enabled: "true" + ssl.enabled: "true" +``` + +Keep in mind that `namespace` in your HelmChart resource metadata section should always be `kube-system`, because the K3s deploy controller is configured to watch this namespace for new HelmChart resources. If you want to specify the namespace for the actual Helm release, you can do that using `targetNamespace` key under the `spec` directive, as shown in the configuration example below. + +> **Note:** In order for the Helm Controller to know which version of Helm to use to Auto-Deploy a helm app, please specify the `helmVersion` in the spec of your YAML file. + +Also note that besides `set`, you can use `valuesContent` under the `spec` directive. And it's okay to use both of them: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: grafana + namespace: kube-system +spec: + chart: stable/grafana + targetNamespace: monitoring + set: + adminPassword: "NotVerySafePassword" + valuesContent: |- + image: + tag: master + env: + GF_EXPLORE_ENABLED: true + adminUser: admin + sidecar: + datasources: + enabled: true +``` + +K3s versions `<= v0.5.0` used `k3s.cattle.io` for the API group of HelmCharts. This has been changed to `helm.cattle.io` for later versions. + +### Using the Helm CRD + +You can deploy a third-party Helm chart using an example like this: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: nginx + namespace: kube-system +spec: + chart: nginx + repo: https://charts.bitnami.com/bitnami + targetNamespace: default +``` + +You can install a specific version of a Helm chart using an example like this: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: stable/nginx-ingress + namespace: kube-system +spec: + chart: nginx-ingress + version: 1.24.4 + targetNamespace: default +``` \ No newline at end of file diff --git a/content/k3s/latest/en/installation/_index.md b/content/k3s/latest/en/installation/_index.md index 22d99bf2cd7..3a6fb03fa7f 100644 --- a/content/k3s/latest/en/installation/_index.md +++ b/content/k3s/latest/en/installation/_index.md @@ -1,349 +1,19 @@ --- -title: "Installation Options" -weight: 2 +title: "Installation" +weight: 20 --- -This section contains information on flags and environment variables used for starting a k3s cluster. +This section contains instructions for installing K3s in various environments. Please ensure you have met the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/) before you begin installing K3s. -Install Script --------------- +[Installation and Configuration Options]({{< baseurl >}}/k3s/latest/en/installation/install-options/) provides guidance on the options available to you when installing K3s. -The install script will attempt to download the latest release, to specify a specific -version for download we can use the `INSTALL_K3S_VERSION` environment variable, for example: -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - -``` -To install just the server without an agent we can add a `INSTALL_K3S_EXEC` -environment variable to the command: -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable-agent" sh - -``` +[High Availability with an External DB]({{< baseurl >}}/k3s/latest/en/installation/ha/) details how to set up an HA K3s cluster backed by an external datastore such as MySQL, PostgreSQL, or etcd. -The installer can also be run without performing downloads by setting `INSTALL_K3S_SKIP_DOWNLOAD=true`, for example: -```sh -curl -sfL https://github.com/rancher/k3s/releases/download/vX.Y.Z/k3s -o /usr/local/bin/k3s -chmod 0755 /usr/local/bin/k3s +[High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) details how to set up an HA K3s cluster that leverages a built-in distributed database. -curl -sfL https://get.k3s.io -o install-k3s.sh -chmod 0755 install-k3s.sh +[Air-Gap Installation]({{< baseurl >}}/k3s/latest/en/installation/airgap/) details how to set up K3s in environments that do not have direct access to the Internet. -export INSTALL_K3S_SKIP_DOWNLOAD=true -./install-k3s.sh -``` +### Uninstalling -The full help text for the install script environment variables are as follows: - - `K3S_*` - - Environment variables which begin with `K3S_` will be preserved for the - systemd service to use. Setting `K3S_URL` without explicitly setting - a systemd exec command will default the command to "agent", and we - enforce that `K3S_TOKEN` or `K3S_CLUSTER_SECRET` is also set. - - - `INSTALL_K3S_SKIP_DOWNLOAD` - - If set to true will not download k3s hash or binary. - - - INSTALL_K3S_SYMLINK - - If set to 'skip' will not create symlinks, 'force' will overwrite, - default will symlink if command does not exist in path. - - - `INSTALL_K3S_VERSION` - - Version of k3s to download from github. Will attempt to download the - latest version if not specified. - - - `INSTALL_K3S_BIN_DIR` - - Directory to install k3s binary, links, and uninstall script to, or use - /usr/local/bin as the default - - - `INSTALL_K3S_SYSTEMD_DIR` - - Directory to install systemd service and environment files to, or use - /etc/systemd/system as the default - - - `INSTALL_K3S_EXEC` or script arguments - - Command with flags to use for launching k3s in the systemd service, if - the command is not specified will default to "agent" if `K3S_URL` is set - or "server" if not. The final systemd command resolves to a combination - of EXEC and script args ($@). - - The following commands result in the same behavior: - ```sh - curl ... | INSTALL_K3S_EXEC="--disable-agent" sh -s - - curl ... | INSTALL_K3S_EXEC="server --disable-agent" sh -s - - curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable-agent - curl ... | sh -s - server --disable-agent - curl ... | sh -s - --disable-agent - ``` - - - `INSTALL_K3S_NAME` - - Name of systemd service to create, will default from the k3s exec command - if not specified. If specified the name will be prefixed with 'k3s-'. - - - `INSTALL_K3S_TYPE` - - Type of systemd service to create, will default from the k3s exec command - if not specified. - -Server Options --------------- - -The following information on server options is also available through `k3s server --help` : - -* `--bind-address` _value_ - - k3s bind address (default: localhost) - -* `--https-listen-port` _value_ - - HTTPS listen port (default: 6443) - -* `--http-listen-port` _value_ - - HTTP listen port (for /healthz, HTTPS redirect, and port for TLS terminating LB) (default: 0) - -* `--data-dir` _value_, `-d` _value_ - - Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root - -* `--disable-agent` - - Do not run a local agent and register a local kubelet - -* `--log` _value_, `-l` _value_ - - Log to file - -* `--cluster-cidr` _value_ - - Network CIDR to use for pod IPs (default: "10.42.0.0/16") - -* `--cluster-secret` _value_ - - Shared secret used to bootstrap a cluster [$`K3S_CLUSTER_SECRET`] - -* `--service-cidr` _value_ - - Network CIDR to use for services IPs (default: "10.43.0.0/16") - -* `--cluster-dns` _value_ - - Cluster IP for coredns service. Should be in your service-cidr range - -* `--cluster-domain` _value_ - - Cluster Domain (default: "cluster.local") - -* `--no-deploy` _value_ - - Do not deploy packaged components (valid items: coredns, servicelb, traefik) - -* `--write-kubeconfig` _value_, `-o` _value_ - - Write kubeconfig for admin client to this file [$`K3S_KUBECONFIG_OUTPUT`] - -* `--write-kubeconfig-mode` _value_ - - Write kubeconfig with this mode [$`K3S_KUBECONFIG_MODE`] - -* `--tls-san` _value_ - - Add additional hostname or IP as a Subject Alternative Name in the TLS cert - -* `--kube-apiserver-arg` _value_ - - Customized flag for kube-apiserver process - -* `--kube-scheduler-arg` _value_ - - Customized flag for kube-scheduler process - -* `--kube-controller-arg` _value_ - - Customized flag for kube-controller-manager process - -* `--rootless` - - (experimental) Run rootless - -* `--storage-backend` _value_ - - Specify storage type etcd3 or kvsql [$`K3S_STORAGE_BACKEND`] - -* `--storage-endpoint` _value_ - - Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$`K3S_STORAGE_ENDPOINT`] - -* `--storage-cafile` _value_ - - SSL Certificate Authority file used to secure storage backend communication [$`K3S_STORAGE_CAFILE`] - -* `--storage-certfile` _value_ - - SSL certification file used to secure storage backend communication [$`K3S_STORAGE_CERTFILE`] - -* `--storage-keyfile` _value_ - - SSL key file used to secure storage backend communication [$`K3S_STORAGE_KEYFILE`] - -* `--node-ip` _value_, `-i` _value_ - - (agent) IP address to advertise for node - -* `--node-name` _value_ - - (agent) Node name [$`K3S_NODE_NAME`] - -* `--docker` - - (agent) Use docker instead of containerd - -* `--no-flannel` - - (agent) Disable embedded flannel - -* `--flannel-iface` _value_ - - (agent) Override default flannel interface - -* `--container-runtime-endpoint` _value_ - - (agent) Disable embedded containerd and use alternative CRI implementation - -* `--pause-image` _value_ - - (agent) Customized pause image for containerd sandbox - -* `--resolv-conf` _value_ - - (agent) Kubelet resolv.conf file [$`K3S_RESOLV_CONF`] - -* `--kubelet-arg` _value_ - - (agent) Customized flag for kubelet process - -* `--kube-proxy-arg` _value_ - - (agent) Customized flag for kube-proxy process - -* `--node-label` _value_ - - (agent) Registering kubelet with set of labels - -* `--node-taint` _value_ - - (agent) Registering kubelet with set of taints - -Agent Options ------------------- - -The following information on agent options is also available through `k3s agent --help` : - -* `--token` _value_, `-t` _value_ - - Token to use for authentication [$`K3S_TOKEN`] - -* `--token-file` _value_ - - Token file to use for authentication [$`K3S_TOKEN_FILE`] - -* `--server` _value_, `-s` _value_ - - Server to connect to [$`K3S_URL`] - -* `--data-dir` _value_, `-d` _value_ - - Folder to hold state (default: "/var/lib/rancher/k3s") - -* `--cluster-secret` _value_ - - Shared secret used to bootstrap a cluster [$`K3S_CLUSTER_SECRET`] - -* `--rootless` - - (experimental) Run rootless - -* `--docker` - - (agent) Use docker instead of containerd - -* `--no-flannel` - - (agent) Disable embedded flannel - -* `--flannel-iface` _value_ - - (agent) Override default flannel interface - -* `--node-name` _value_ - - (agent) Node name [$`K3S_NODE_NAME`] - -* `--node-ip` _value_, `-i` _value - - (agent) IP address to advertise for node - -* `--container-runtime-endpoint` _value_ - - (agent) Disable embedded containerd and use alternative CRI implementation - -* `--pause-image` _value_ - - (agent) Customized pause image for containerd sandbox - -* `--resolv-conf` _value_ - - (agent) Kubelet resolv.conf file [$`K3S_RESOLV_CONF`] - -* `--kubelet-arg` _value_ - - (agent) Customized flag for kubelet process - -* `--kube-proxy-arg` _value_ - - (agent) Customized flag for kube-proxy process - -* `--node-label` _value_ - - (agent) Registering kubelet with set of labels - -* `--node-taint` _value_ - - (agent) Registering kubelet with set of taints - -Customizing components ----------------------- - -As of v0.3.0 any of the following processes can be customized with extra flags: - -* `--kube-apiserver-arg` _value_ - - (server) [kube-apiserver options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) - -* `--kube-controller-arg` _value_ - - (server) [kube-controller-manager options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) - -* `--kube-scheduler-arg` _value_ - - (server) [kube-scheduler options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) - -* `--kubelet-arg` _value_ - - (agent) [kubelet options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) - -* `--kube-proxy-arg` _value_ - - (agent) [kube-proxy options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/) - -Adding extra arguments can be done by passing the following flags to server or agent. -For example to add the following arguments `-v=9` and `log-file=/tmp/kubeapi.log` to the kube-apiserver, you should add the following options to k3s server: - -``` ---kube-apiserver-arg v=9 --kube-apiserver-arg log-file=/tmp/kubeapi.log -``` +If you installed K3s with the help of the `install.sh` script, an uninstall script is generated during installation. The script is created on your node at `/usr/local/bin/k3s-uninstall.sh` (or as `k3s-agent-uninstall.sh`). diff --git a/content/k3s/latest/en/installation/airgap/_index.md b/content/k3s/latest/en/installation/airgap/_index.md new file mode 100644 index 00000000000..66564948a5d --- /dev/null +++ b/content/k3s/latest/en/installation/airgap/_index.md @@ -0,0 +1,79 @@ +--- +title: "Air-Gap Install" +weight: 60 +--- + +In this guide, we are assuming you have created your nodes in your air-gap environment and have a secure Docker private registry on your bastion server. + +# Installation Outline + +1. [Prepare Images Directory](#prepare-images-directory) +2. [Create Registry YAML](#create-registry-YAML) +3. [Install K3s](#install-k3s) + +### Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA) + +### Install K3s + +Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, name it `install.sh`. + +Install K3s on each server: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Install K3s on each agent: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + + +>**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. + +# Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). diff --git a/content/k3s/latest/en/installation/datastore/_index.md b/content/k3s/latest/en/installation/datastore/_index.md new file mode 100644 index 00000000000..63ef6baa32b --- /dev/null +++ b/content/k3s/latest/en/installation/datastore/_index.md @@ -0,0 +1,97 @@ +--- +title: "Cluster Datastore Options" +weight: 50 +--- + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available datastore options allow you to select a datastore that best fits your use case. For example: + +* If your team doesn't have expertise in operating etcd, you can choose an enterprise-grade SQL database like MySQL or PostgreSQL +* If you need to run a simple, short-lived cluster in your CI/CD environment, you can use the embedded SQLite database +* If you wish to deploy Kubernetes on the edge and require a highly available solution but can't afford the operational overhead of managing a database at the edge, you can use K3s's embedded HA datastore built on top of DQLite (currently experimental) + +K3s supports the following datastore options: + +* Embedded [SQLite](https://www.sqlite.org/index.html) +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) +* Embedded [DQLite](https://dqlite.io/) for High Availability (experimental) + +### External Datastore Configuration Parameters +If you wish to use an external datastore such as PostgreSQL, MySQL, or etcd you must set the `datastore-endpoint` parameter so that K3s knows how to connect to it. You may also specify parameters to configure the authentication and encryption of the connection. The below table summarizes these parameters, which can be passed as either CLI flags or environment variables. + + CLI Flag | Environment Variable | Description + ------------|-------------|------------------ + `--datastore-endpoint` | `K3S_DATASTORE_ENDPOINT` | Specify a PostgresSQL, MySQL, or etcd connection string. This is a string used to describe the connection to the datastore. The structure of this string is specific to each backend and is detailed below. + `--datastore-cafile` | `K3S_DATASTORE_CAFILE` | TLS Certificate Authority (CA) file used to help secure communication with the datastore. If your datastore serves requests over TLS using a certificate signed by a custom certificate authority, you can specify that CA using this parameter so that the K3s client can properly verify the certificate. | +| `--datastore-certfile` | `K3S_DATASTORE_CERTFILE` | TLS certificate file used for client certificate based authentication to your datastore. To use this feature, your datastore must be configured to support client certificate based authentication. If you specify this parameter, you must also specify the `datastore-keyfile` parameter. | +| `--datastore-keyfile` | `K3S_DATASTORE_KEYFILE` | TLS key file used for client certificate based authentication to your datastore. See the previous `datastore-certfile` parameter for more details. | + +As a best practice we recommend setting these parameters as environment variables rather than command line arguments so that your database credentials or other sensitive information aren't exposed as part of the process info. + +### Datastore Endpoint Format and Functionality +As mentioned, the format of the value passed to the `datastore-endpoint` parameter is dependent upon the datastore backend. The following details this format and functionality for each supported external datastore. + +{{% tabs %}} +{{% tab "PostgreSQL" %}} + +In its most common form, the datastore-endpoint parameter for PostgreSQL has the following format: + +`postgres://username:password@hostname:port/database-name` + +More advanced configuration parameters are available. For more information on these, please see https://godoc.org/github.com/lib/pq. + +If you specify a database name and it does not exist, the server will attempt to create it. + +If you only supply `postgres://` as the endpoint, K3s will attempt to do the following: + +* Connect to localhost using `postgres` as the username and password +* Create a database named `kubernetes` + + +{{% /tab %}} +{{% tab "MySQL" %}} + +In its most common form, the `datastore-endpoint` parameter for MySQL has the following format: + +`mysql://username:password@tcp(hostname:3306)/database-name` + +More advanced configuration parameters are available. For more information on these, please see https://github.com/go-sql-driver/mysql#dsn-data-source-name + +Note that due to a [known issue](https://github.com/rancher/k3s/issues/1093) in K3s, you cannot set the `tls` parameter. TLS communication is supported, but you cannot, for example, set this parameter to "skip-verify" to cause K3s to skip certificate verification. + +If you specify a database name and it does not exist, the server will attempt to create it. + +If you only supply `mysql://` as the endpoint, K3s will attempt to do the following: + +* Connect to the MySQL socket at `/var/run/mysqld/mysqld.sock` using the `root` user and no password +* Create a database with the name `kubernetes` + + +{{% /tab %}} +{{% tab "etcd" %}} + +In its most common form, the `datastore-endpoint` parameter for etcd has the following format: + +`https://etcd-host-1:2379,https://etcd-host-2:2379,https://etcd-host-3:2379` + +The above assumes a typical three node etcd cluster. The parameter can accept one more comma separated etcd URLs. + +{{% /tab %}} +{{% /tabs %}} + +
Based on the above, the following example command could be used to launch a server instance that connects to a PostgresSQL database named k3s: +``` +K3S_DATASTORE_ENDPOINT='postgres://username:password@hostname:5432/k3s' k3s server +``` + +And the following example could be used to connect to a MySQL database using client certificate authentication: +``` +K3S_DATASTORE_ENDPOINT='mysql://username:password@tcp(hostname:3306)/k3s' \ +K3S_DATASTORE_CERTFILE='/path/to/client.crt' \ +K3S_DATASTORE_KEYFILE='/path/to/client.key' \ +k3s server +``` + +### Embedded DQLite for HA (Experimental) +K3s's use of DQLite is similar to its use of SQLite. It is simple to set up and manage. As such, there is no external configuration or additional steps to take in order to use this option. Please see [High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) for instructions on how to run with this option. diff --git a/content/k3s/latest/en/installation/ha-embedded/_index.md b/content/k3s/latest/en/installation/ha-embedded/_index.md new file mode 100644 index 00000000000..58526b65280 --- /dev/null +++ b/content/k3s/latest/en/installation/ha-embedded/_index.md @@ -0,0 +1,22 @@ +--- +title: "High Availability with Embedded DB (Experimental)" +weight: 40 +--- + +As of v1.0.0, K3s is previewing support for running a highly available control plane without the need for an external database. This means there is no need to manage an external etcd or SQL datastore in order to run a reliable production-grade setup. While this feature is currently experimental, we expect it to be the primary architecture for running HA K3s clusters in the future. + +This architecture is achieved by embedding a dqlite database within the K3s server process. DQLite is short for "distributed SQLite." According to https://dqlite.io, it is "*a fast, embedded, persistent SQL database with Raft consensus that is perfect for fault-tolerant IoT and Edge devices.*" This makes it a natural fit for K3s. + +To run K3s in this mode, you must have an odd number of server nodes. We recommend starting with three nodes. + +To get started, first launch a server node with the `cluster-init` flag to enable clustering and a token that will be used as a shared secret to join additional servers to the cluster. +``` +K3S_TOKEN=SECRET k3s server --cluster-init +``` + +After launching the first server, join the second and third servers to the cluster using the shared secret: +``` +K3S_TOKEN=SECRET k3s server --server https://:6443 +``` + +Now you have a highly available control plane. Joining additional worker nodes to the cluster follows the same procedure as a single server cluster. diff --git a/content/k3s/latest/en/installation/ha/_index.md b/content/k3s/latest/en/installation/ha/_index.md new file mode 100644 index 00000000000..adea8ad1938 --- /dev/null +++ b/content/k3s/latest/en/installation/ha/_index.md @@ -0,0 +1,71 @@ +--- +title: High Availability with an External DB +weight: 30 +--- + +>**Note:** Official support for installing Rancher on a Kubernetes cluster was introduced in our v1.0.0 release. + +This section describes how to install a high-availability K3s cluster with an external database. + +Single server clusters can meet a variety of use cases, but for environments where uptime of the Kubernetes control plane is critical, you can run K3s in an HA configuration. An HA K3s cluster is comprised of: + +* Two or more **server nodes** that will serve the Kubernetes API and run other control plane services +* Zero or more **agent nodes** that are designated to run your apps and services +* An **external datastore** (as opposed to the embedded SQLite datastore used in single-server setups) +* A **fixed registration address** that is placed in front of the server nodes to allow agent nodes to register with the cluster + +For more details on how these components work together, refer to the [architecture section.]({{}}/k3s/latest/en/architecture/#high-availability-with-an-external-db) + +Agents register through the fixed registration address, but after registration they establish a connection directly to one of the server nodes. This is a websocket connection initiated by the `k3s agent` process and it is maintained by a client-side load balancer running as part of the agent process. + +# Installation Outline + +Setting up an HA cluster requires the following steps: + +1. [Create an external datastore](#1-create-an-external-datastore) +2. [Launch server nodes](#2-launch-server-nodes) +3. [Configure the fixed registration address](#3-configure-the-fixed-registration-address) +4. [Join agent nodes](#4-optional-join-agent-nodes) + +### 1. Create an External Datastore +You will first need to create an external datastore for the cluster. See the [Cluster Datastore Options]({{< baseurl >}}/k3s/latest/en/installation/datastore/) documentation for more details. + +### 2. Launch Server Nodes +K3s requires two or more server nodes for this HA configuration. See the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/) guide for minimum machine requirements. + +When running the `k3s server` command on these nodes, you must set the `datastore-endpoint` parameter so that K3s knows how to connect to the external datastore. + +For example, a command like the following could be used to install the K3s server with a MySQL database as the external datastore: + +``` +curl -sfL https://get.k3s.io | sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" +``` + +The datastore endpoint format differs based on the database type. For details, refer to the section on [datastore endpoint formats.]({{}}/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality) + +To configure TLS certificates when launching server nodes, refer to the [datastore configuration guide.]({{}}/k3s/latest/en/installation/datastore/#external-datastore-configuration-parameters) + +> **Note:** The same installation options available to single-server installs are also available for high-availability installs. For more details, see the [Installation and Configuration Options]({{}}/k3s/latest/en/installation/install-options/) documentation. + +By default, server nodes will be schedulable and thus your workloads can get launched on them. If you wish to have a dedicated control plane where no user workloads will run, you can use taints. The `node-taint` parameter will allow you to configure nodes with taints, for example `--node-taint k3s-controlplane=true:NoExecute`. + +Once you've launched the `k3s server` process on all server nodes, ensure that the cluster has come up properly with `k3s kubectl get nodes`. You should see your server nodes in the Ready state. + +### 3. Configure the Fixed Registration Address +Agent nodes need a URL to register against. This can be the IP or hostname of any of the server nodes, but in many cases those may change over time. For example, if you are running your cluster in a cloud that supports scaling groups, you may scale the server node group up and down over time, causing nodes to be created and destroyed and thus having different IPs from the initial set of server nodes. Therefore, you should have a stable endpoint in front of the server nodes that will not change over time. This endpoint can be set up using any number approaches, such as: + +* A layer-4 (TCP) load balancer +* Round-robin DNS +* Virtual or elastic IP addresses + +This endpoint can also be used for accessing the Kubernetes API. So you can, for example, modify your [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file to point to it instead of a specific node. + +### 4. Optional: Join Agent Nodes + +Because K3s server nodes are schedulable by default, the minimum number of nodes for an HA K3s server cluster is two server nodes and zero agent nodes. To add nodes designated to run your apps and services, join agent nodes to your cluster. + +Joining agent nodes in an HA cluster is the same as joining agent nodes in a single server cluster. You just need to specify the URL the agent should register to and the token it should use. +``` +K3S_TOKEN=SECRET k3s agent --server https://fixed-registration-address:6443 +``` diff --git a/content/k3s/latest/en/installation/install-options/_index.md b/content/k3s/latest/en/installation/install-options/_index.md new file mode 100644 index 00000000000..424cbd9ae73 --- /dev/null +++ b/content/k3s/latest/en/installation/install-options/_index.md @@ -0,0 +1,208 @@ +--- +title: "Installation Options" +weight: 20 +--- + +This page focuses on the options that can be used when you set up K3s for the first time: + +- [Installation script options](#installation-script-options) +- [Installing K3s from the binary](#installing-k3s-from-the-binary) +- [Registration options for the K3s server](#registration-options-for-the-k3s-server) +- [Registration options for the K3s agent](#registration-options-for-the-k3s-agent) + +For more advanced options, refer to [this page.]({{}}/k3s/latest/en/advanced) + +# Installation Script Options + +As mentioned in the [Quick-Start Guide]({{< baseurl >}}/k3s/latest/en/quick-start/), you can use the installation script available at https://get.k3s.io to install K3s as a service on systemd and openrc based systems. + +The simplest form of this command is as follows: +```sh +curl -sfL https://get.k3s.io | sh - +``` + +When using this method to install K3s, the following environment variables can be used to configure the installation: + +- `INSTALL_K3S_SKIP_DOWNLOAD` + + If set to true will not download K3s hash or binary. + +- `INSTALL_K3S_SYMLINK` + + If set to 'skip' will not create symlinks, 'force' will overwrite, default will symlink if command does not exist in path. + +- `INSTALL_K3S_SKIP_START` + + If set to true will not start K3s service. + +- `INSTALL_K3S_VERSION` + + Version of K3s to download from github. Will attempt to download the latest version if not specified. + +- `INSTALL_K3S_BIN_DIR` + + Directory to install K3s binary, links, and uninstall script to, or use `/usr/local/bin` as the default. + +- `INSTALL_K3S_BIN_DIR_READ_ONLY` + + If set to true will not write files to `INSTALL_K3S_BIN_DIR`, forces setting `INSTALL_K3S_SKIP_DOWNLOAD=true`. + +- `INSTALL_K3S_SYSTEMD_DIR` + + Directory to install systemd service and environment files to, or use `/etc/systemd/system` as the default. + +- `INSTALL_K3S_EXEC` + + Command with flags to use for launching K3s in the service. If the command is not specified, it will default to "agent" if `K3S_URL` is set or "server" if it is not set. + + The final systemd command resolves to a combination of this environment variable and script args. To illustrate this, the following commands result in the same behavior of registering a server without flannel: + ```sh + curl ... | INSTALL_K3S_EXEC="--no-flannel" sh -s - + curl ... | INSTALL_K3S_EXEC="server --no-flannel" sh -s - + curl ... | INSTALL_K3S_EXEC="server" sh -s - --no-flannel + curl ... | sh -s - server --no-flannel + curl ... | sh -s - --no-flannel + ``` + + - `INSTALL_K3S_NAME` + + Name of systemd service to create, will default from the K3s exec command if not specified. If specified the name will be prefixed with 'k3s-'. + + - `INSTALL_K3S_TYPE` + + Type of systemd service to create, will default from the K3s exec command if not specified. + + +Environment variables which begin with `K3S_` will be preserved for the systemd and openrc services to use. Setting `K3S_URL` without explicitly setting an exec command will default the command to "agent". When running the agent `K3S_TOKEN` must also be set. + + +# Installing K3s from the Binary + +As stated, the installation script is primarily concerned with configuring K3s to run as a service. If you choose to not use the script, you can run K3s simply by downloading the binary from our [release page](https://github.com/rancher/k3s/releases/latest), placing it on your path, and executing it. The K3s binary supports the following commands: + +Command | Description +--------|------------------ +`k3s server` | Run the K3s management server, which will also launch Kubernetes control plane components such as the API server, controller-manager, and scheduler. +`k3s agent` | Run the K3s node agent. This will cause K3s to run as a worker node, launching the Kubernetes node services `kubelet` and `kube-proxy`. +`k3s kubectl` | Run an embedded [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) CLI. If the `KUBECONFIG` environment variable is not set, this will automatically attempt to use the config file that is created at `/etc/rancher/k3s/k3s.yaml` when launching a K3s server node. +`k3s crictl` | Run an embedded [crictl](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md). This is a CLI for interacting with Kubernetes's container runtime interface (CRI). Useful for debugging. +`k3s ctr` | Run an embedded [ctr](https://github.com/projectatomic/containerd/blob/master/docs/cli.md). This is a CLI for containerd, the container daemon used by K3s. Useful for debugging. +`k3s help` | Shows a list of commands or help for one command + +The `k3s server` and `k3s agent` commands have additional configuration options that can be viewed with `k3s server --help` or `k3s agent --help`. For convenience, that help text is presented here: + +# Registration Options for the K3s Server +``` +NAME: + k3s server - Run management server + +USAGE: + k3s server [OPTIONS] + +OPTIONS: + -v value (logging) Number for the log level verbosity (default: 0) + --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging + --log value, -l value (logging) Log to file + --alsologtostderr (logging) Log to standard error as well as file (if set) + --bind-address value (listener) k3s bind address (default: 0.0.0.0) + --https-listen-port value (listener) HTTPS listen port (default: 6443) + --advertise-address value (listener) IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip) + --advertise-port value (listener) Port that apiserver uses to advertise to members of the cluster (default: listen-port) (default: 0) + --tls-san value (listener) Add additional hostname or IP as a Subject Alternative Name in the TLS cert + --data-dir value, -d value (data) Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root + --cluster-cidr value (networking) Network CIDR to use for pod IPs (default: "10.42.0.0/16") + --service-cidr value (networking) Network CIDR to use for services IPs (default: "10.43.0.0/16") + --cluster-dns value (networking) Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10) + --cluster-domain value (networking) Cluster Domain (default: "cluster.local") + --flannel-backend value (networking) One of 'none', 'vxlan', 'ipsec', or 'flannel' (default: "vxlan") + --token value, -t value (cluster) Shared secret used to join a server or agent to a cluster [$K3S_TOKEN] + --token-file value (cluster) File containing the cluster-secret/token [$K3S_TOKEN_FILE] + --write-kubeconfig value, -o value (client) Write kubeconfig for admin client to this file [$K3S_KUBECONFIG_OUTPUT] + --write-kubeconfig-mode value (client) Write kubeconfig with this mode [$K3S_KUBECONFIG_MODE] + --kube-apiserver-arg value (flags) Customized flag for kube-apiserver process + --kube-scheduler-arg value (flags) Customized flag for kube-scheduler process + --kube-controller-manager-arg value (flags) Customized flag for kube-controller-manager process + --kube-cloud-controller-manager-arg value (flags) Customized flag for kube-cloud-controller-manager process + --datastore-endpoint value (db) Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$K3S_DATASTORE_ENDPOINT] + --datastore-cafile value (db) TLS Certificate Authority file used to secure datastore backend communication [$K3S_DATASTORE_CAFILE] + --datastore-certfile value (db) TLS certification file used to secure datastore backend communication [$K3S_DATASTORE_CERTFILE] + --datastore-keyfile value (db) TLS key file used to secure datastore backend communication [$K3S_DATASTORE_KEYFILE] + --default-local-storage-path value (storage) Default local storage path for local provisioner storage class + --no-deploy value (components) Do not deploy packaged components (valid items: coredns, servicelb, traefik, local-storage, metrics-server) + --disable-scheduler (components) Disable Kubernetes default scheduler + --disable-cloud-controller (components) Disable k3s default cloud controller manager + --disable-network-policy (components) Disable k3s default network policy controller + --node-name value (agent/node) Node name [$K3S_NODE_NAME] + --with-node-id (agent/node) Append id to node name + --node-label value (agent/node) Registering kubelet with set of labels + --node-taint value (agent/node) Registering kubelet with set of taints + --docker (agent/runtime) Use docker instead of containerd + --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation + --pause-image value (agent/runtime) Customized pause image for containerd sandbox + --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") + --node-ip value, -i value (agent/networking) IP address to advertise for node + --node-external-ip value (agent/networking) External IP address to advertise for node + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Override default flannel interface + --flannel-conf value (agent/networking) Override default flannel config file + --kubelet-arg value (agent/flags) Customized flag for kubelet process + --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process + --rootless (experimental) Run rootless + --agent-token value (experimental/cluster) Shared secret used to join agents to the cluster, but not servers [$K3S_AGENT_TOKEN] + --agent-token-file value (experimental/cluster) File containing the agent secret [$K3S_AGENT_TOKEN_FILE] + --server value, -s value (experimental/cluster) Server to connect to, used to join a cluster [$K3S_URL] + --cluster-init (experimental/cluster) Initialize new cluster master [$K3S_CLUSTER_INIT] + --cluster-reset (experimental/cluster) Forget all peers and become a single cluster new cluster master [$K3S_CLUSTER_RESET] + --no-flannel (deprecated) use --flannel-backend=none + --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] +``` + +# Registration Options for the K3s Agent +``` +NAME: + k3s agent - Run node agent + +USAGE: + k3s agent [OPTIONS] + +OPTIONS: + -v value (logging) Number for the log level verbosity (default: 0) + --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging + --log value, -l value (logging) Log to file + --alsologtostderr (logging) Log to standard error as well as file (if set) + --token value, -t value (cluster) Token to use for authentication [$K3S_TOKEN] + --token-file value (cluster) Token file to use for authentication [$K3S_TOKEN_FILE] + --server value, -s value (cluster) Server to connect to [$K3S_URL] + --data-dir value, -d value (agent/data) Folder to hold state (default: "/var/lib/rancher/k3s") + --node-name value (agent/node) Node name [$K3S_NODE_NAME] + --with-node-id (agent/node) Append id to node name + --node-label value (agent/node) Registering kubelet with set of labels + --node-taint value (agent/node) Registering kubelet with set of taints + --docker (agent/runtime) Use docker instead of containerd + --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation + --pause-image value (agent/runtime) Customized pause image for containerd sandbox + --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") + --node-ip value, -i value (agent/networking) IP address to advertise for node + --node-external-ip value (agent/networking) External IP address to advertise for node + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Override default flannel interface + --flannel-conf value (agent/networking) Override default flannel config file + --kubelet-arg value (agent/flags) Customized flag for kubelet process + --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process + --rootless (experimental) Run rootless + --no-flannel (deprecated) use --flannel-backend=none + --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] +``` + +### Node Labels and Taints for Agents + +K3s agents can be configured with the options `--node-label` and `--node-taint` which adds a label and taint to the kubelet. The two options only add labels and/or taints at registration time, so they can only be added once and not changed after that again by running K3s commands. + +Below is an example showing how to add labels and a taint: +``` + --node-label foo=bar \ + --node-label hello=world \ + --node-taint key1=value1:NoExecute +``` + +If you want to change node labels and taints after node registration you should use `kubectl`. Refer to the official Kubernetes documentation for details on how to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) and [node labels.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) \ No newline at end of file diff --git a/content/k3s/latest/en/installation/kube-dashboard/_index.md b/content/k3s/latest/en/installation/kube-dashboard/_index.md new file mode 100644 index 00000000000..ace9931b2e9 --- /dev/null +++ b/content/k3s/latest/en/installation/kube-dashboard/_index.md @@ -0,0 +1,88 @@ +--- +title: "Kubernetes Dashboard" +weight: 60 +--- + +This installation guide will help you to deploy and configure the [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) on K3s. + +### Deploying the Kubernetes Dashboard + +```bash +sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc5/aio/deploy/recommended.yaml +``` + +### Dashboard RBAC Configuration + +> **Important:** The `admin-user` created in this guide will have administrative privileges in the Dashboard. + +Create the following resource manifest files: + +`dashboard.admin-user.yml` +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin-user + namespace: kubernetes-dashboard +``` + +`dashboard.admin-user-role.yml` +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: admin-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: admin-user + namespace: kubernetes-dashboard +``` + +Deploy the `admin-user` configuration: + +```bash +sudo k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml +``` + +### Obtain the Bearer Token + +```bash +sudo k3s kubectl -n kubernetes-dashboard describe secret admin-user-token | grep ^token +``` + +### Local Access to the Dashboard + +To access the Dashboard you must create a secure channel to your K3s cluster: + +```bash +sudo k3s kubectl proxy +``` + +The Dashboard is now accessible at: + +* http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ +* `Sign In` with the `admin-user` Bearer Token + +#### Advanced: Remote Access to the Dashboard + +Please see: Using [Port Forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to Access Applications in a Cluster. + +### Upgrading the Dashboard + +The latest Dashboard releases are available from: https://github.com/kubernetes/dashboard/releases/latest + +```bash +sudo k3s kubectl delete ns kubernetes-dashboard +sudo k3s kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/[...] +``` + +### Deleting the Dashboard and admin-user configuration + +```bash +sudo k3s kubectl delete -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc5/aio/deploy/recommended.yaml +sudo k3s kubectl delete -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml +``` diff --git a/content/k3s/latest/en/installation/network-options/_index.md b/content/k3s/latest/en/installation/network-options/_index.md new file mode 100644 index 00000000000..c87b2783831 --- /dev/null +++ b/content/k3s/latest/en/installation/network-options/_index.md @@ -0,0 +1,71 @@ +--- +title: "Network Options" +weight: 25 +--- + +> **Note:** Please reference the [Networking]({{< baseurl >}}/k3s/latest/en/networking) page for information about CoreDNS, Traefik, and the Service LB. + +By default, K3s will run with flannel as the CNI, using VXLAN as the default backend. To change the CNI, refer to the section on configuring a [custom CNI](#custom-cni). To change the flannel backend, refer to the flannel options section. + +### Flannel Options + +The default backend for flannel is VXLAN. To enable encryption, pass the IPSec (Internet Protocol Security) or WireGuard options below. + +If you wish to use WireGuard as your flannel backend it may require additional kernel modules. Please see the [WireGuard Install Guide](https://www.wireguard.com/install/) for details. The WireGuard install steps will ensure the appropriate kernel modules are installed for your operating system. You need to install WireGuard on every node, both server and agents before attempting to leverage the WireGuard flannel backend option. + + CLI Flag and Value | Description + -------------------|------------ + `--flannel-backend=vxlan` | (Default) Uses the VXLAN backend. | + `--flannel-backend=ipsec` | Uses the IPSEC backend which encrypts network traffic. | + `--flannel-backend=host-gw` | Uses the host-gw backend. | + `--flannel-backend=wireguard` | Uses the WireGuard backend which encrypts network traffic. May require additional kernel modules and configuration. | + +### Custom CNI + +Run K3s with `--flannel-backend=none` and install your CNI of choice. IP Forwarding should be enabled for Canal and Calico. Please reference the steps below. + +{{% tabs %}} +{{% tab "Canal" %}} + +Visit the [Project Calico Docs](https://docs.projectcalico.org/) website. Follow the steps to install Canal. Modify the Canal YAML so that IP forwarding is allowed in the container_settings section, for example: + +``` +"container_settings": { + "allow_ip_forwarding": true + } +``` + +Apply the Canal YAML. + +Ensure the settings were applied by running the following command on the host: + +``` +cat /etc/cni/net.d/10-canal.conflist +``` + +You should see that IP forwarding is set to true. + +{{% /tab %}} +{{% tab "Calico" %}} + +Follow the [Calico CNI Plugins Guide](https://docs.projectcalico.org/master/reference/cni-plugin/configuration). Modify the Calico YAML so that IP forwarding is allowed in the container_settings section, for example: + +``` +"container_settings": { + "allow_ip_forwarding": true + } +``` + +Apply the Calico YAML. + +Ensure the settings were applied by running the following command on the host: + +``` +cat /etc/cni/net.d/10-calico.conflist +``` + +You should see that IP forwarding is set to true. + + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/k3s/latest/en/installation/node-requirements/_index.md b/content/k3s/latest/en/installation/node-requirements/_index.md new file mode 100644 index 00000000000..6373503c7da --- /dev/null +++ b/content/k3s/latest/en/installation/node-requirements/_index.md @@ -0,0 +1,40 @@ +--- +title: Node Requirements +weight: 1 +--- + +K3s is very lightweight, but has some minimum requirements as outlined below. + +Whether you're configuring a K3s cluster to run in a Docker or Kubernetes setup, each node running K3s should meet the following minimum requirements. You may need more resources to fit your needs. + +## Prerequisites +* Two nodes cannot have the same hostname. If all your nodes have the same hostname, pass `--node-name` or set `$K3S_NODE_NAME` with a unique name for each node you add to the cluster. + +## Operating Systems + +K3s should run on just about any flavor of Linux. However, K3s is tested on the following operating systems and their subsequent non-major releases. + +* Ubuntu 16.04 (amd64) +* Ubuntu 18.04 (amd64) +* Raspbian Buster (armhf) + +> If you are using Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. + +## Hardware + +Hardware requirements scale based on the size of your deployments. Minimum recommendations are outlined here. + +* RAM: 512MB Minimum +* CPU: 1 Minimum + +#### Disks + +K3s performance depends on the performance of the database. To ensure optimal speed, we recommend using an SSD when possible. Disk performance will vary on ARM devices utilizing an SD card or eMMC. + +## Networking + +The K3s server needs port 6443 to be accessible by the nodes. The nodes need to be able to reach other nodes over UDP port 8472 (Flannel VXLAN). If you do not use flannel and provide your own custom CNI, then port 8472 is not needed by K3s. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. + +IMPORTANT: The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disabled access to port 8472. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. diff --git a/content/k3s/latest/en/installation/private-registry/_index.md b/content/k3s/latest/en/installation/private-registry/_index.md new file mode 100644 index 00000000000..6160866e13a --- /dev/null +++ b/content/k3s/latest/en/installation/private-registry/_index.md @@ -0,0 +1,129 @@ +--- +title: "Private Registry Configuration" +weight: 55 +--- +_Available as of v1.0.0_ + +Containerd can be configured to connect to private registries and use them to pull private images on the node. + +Upon startup, K3s will check to see if a `registries.yaml` file exists at `/etc/rancher/k3s/` and instruct containerd to use any registries defined in the file. If you wish to use a private registry, then you will need to create this file as root on each node that will be using the registry. + +Note that server nodes are schedulable by default. If you have not tainted the server nodes and will be running workloads on them, please ensure you also create the `registries.yaml` file on each server as well. + +Configuration in containerd can be used to connect to a private registry with a TLS connection and with registries that enable authentication as well. The following section will explain the `registries.yaml` file and give different examples of using private registry configuration in K3s. + +# Registries Configuration File + +The file consists of two main sections: + +- mirrors +- configs + +### Mirrors + +Mirrors is a directive that defines the names and endpoints of the private registries, for example: + +``` +mirrors: + "mycustomreg.com:5000": + endpoint: + - "https://mycustomreg.com:5000" +``` + +Each mirror must have a name and set of endpoints. When pulling an image from a registry, containerd will try these endpoint URLs one by one, and use the first working one. + +### Configs + +The configs section defines the TLS and credential configuration for each mirror. For each mirror you can define `auth` and/or `tls`. The TLS part consists of: + +Directive | Description +----------|------------ +`cert_file` | The client certificate path that will be used to authenticate with the registry +`key_file` | The client key path that will be used to authenticate with the registry +`ca_file` | Defines the CA certificate path to be used to verify the registry's server cert file + +The credentials consist of either username/password or authentication token: + +- username: user name of the private registry basic auth +- password: user password of the private registry basic auth +- auth: authentication token of the private registry basic auth + +Below are basic examples of using private registries in different modes: + +### With TLS + +Below are examples showing how you may configure `/etc/rancher/k3s/registries.yaml` on each node when using TLS. + +{{% tabs %}} +{{% tab "With Authentication" %}} + +``` +mirrors: + "mycustomreg.com:5000": + endpoint: + - "https://mycustomreg.com:5000" +configs: + "mycustomreg:5000": + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: # path to the cert file used in the registry + key_file: # path to the key file used in the registry + ca_file: # path to the ca file used in the registry +``` + +{{% /tab %}} +{{% tab "Without Authentication" %}} + +``` +mirrors: + "mycustomreg.com:5000": + endpoint: + - "https://mycustomreg.com:5000" +configs: + "mycustomreg:5000": + tls: + cert_file: # path to the cert file used in the registry + key_file: # path to the key file used in the registry + ca_file: # path to the ca file used in the registry +``` + +{{% /tab %}} +{{% /tabs %}} + +### Without TLS + +Below are examples showing how you may configure `/etc/rancher/k3s/registries.yaml` on each node when _not_ using TLS. + +{{% tabs %}} +{{% tab "With Authentication" %}} + +``` +mirrors: + "mycustomreg.com:5000": + endpoint: + - "http://mycustomreg.com:5000" +configs: + "mycustomreg:5000": + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password +``` + +{{% /tab %}} +{{% tab "Without Authentication" %}} + +``` +mirrors: + "mycustomreg.com:5000": + endpoint: + - "http://mycustomreg.com:5000" +``` + +{{% /tab %}} +{{% /tabs %}} + +> In case of no TLS communication, you need to specify `http://` for the endpoints, otherwise it will default to https. + +In order for the registry changes to take effect, you need to restart K3s on each node. diff --git a/content/k3s/latest/en/installation/uninstall/_index.md b/content/k3s/latest/en/installation/uninstall/_index.md new file mode 100644 index 00000000000..43f51fdeb9c --- /dev/null +++ b/content/k3s/latest/en/installation/uninstall/_index.md @@ -0,0 +1,18 @@ +--- +title: Uninstalling K3s +weight: 61 +--- + +If you installed K3s using the installation script, a script to uninstall K3s was generated during installation. + +To uninstall K3s from a server node, run: + +``` +/usr/local/bin/k3s-uninstall.sh +``` + +To uninstall K3s from an agent node, run: + +``` +/usr/local/bin/k3s-agent-uninstall.sh +``` \ No newline at end of file diff --git a/content/k3s/latest/en/known-issues/_index.md b/content/k3s/latest/en/known-issues/_index.md new file mode 100644 index 00000000000..8107e8a7451 --- /dev/null +++ b/content/k3s/latest/en/known-issues/_index.md @@ -0,0 +1,17 @@ +--- +title: Known Issues +weight: 70 +--- +The Known Issues are updated periodically and designed to inform you about any issues that may not be immediately addressed in the next upcoming release. + +**Snap Docker** + +If you plan to use K3s with docker, Docker installed via a snap package is not recommended as it has been known to cause issues running K3s. + +**Iptables** + +If you are running iptables in nftables mode instead of legacy you might encounter issues. We recommend utilizing newer iptables (such as 1.6.1+) to avoid issues. + +**RootlessKit** + +Running K3s with RootlessKit is experimental and has several [known issues.]({{}}/k3s/latest/en/advanced/#known-issues-with-rootlesskit) diff --git a/content/k3s/latest/en/networking/_index.md b/content/k3s/latest/en/networking/_index.md new file mode 100644 index 00000000000..d4f780d8dc5 --- /dev/null +++ b/content/k3s/latest/en/networking/_index.md @@ -0,0 +1,37 @@ +--- +title: "Networking" +weight: 35 +--- + +>**Note:** CNI options are covered in detail on the [Installation Network Options]({{< baseurl >}}/k3s/latest/en/installation/network-options/) page. Please reference that page for details on Flannel and the various flannel backend options or how to set up your own CNI. + +Open Ports +---------- +Please reference the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/#networking) page for port information. + +CoreDNS +------- + +CoreDNS is deployed on start of the agent. To disable, run each server with the `--no-deploy coredns` option. + +If you don't install CoreDNS, you will need to install a cluster DNS provider yourself. + +Traefik Ingress Controller +-------------------------- + +[Traefik](https://traefik.io/) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. It simplifies networking complexity while designing, deploying, and running applications. + +Traefik is deployed by default when starting the server. For more information see [Auto Deploying Manifests]({{< baseurl >}}/k3s/latest/en/advanced/#auto-deploying-manifests). The default config file is found in `/var/lib/rancher/k3s/server/manifests/traefik.yaml` and any changes made to this file will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. + +The Traefik ingress controller will use ports 80, 443, and 8080 on the host (i.e. these will not be usable for HostPort or NodePort). + +You can tweak traefik to meet your needs by setting options in the traefik.yaml file. Refer to the official [Traefik for Helm Configuration Parameters](https://github.com/helm/charts/tree/master/stable/traefik#configuration) readme for more information. + +To disable it, start each server with the `--no-deploy traefik` option. + +Service Load Balancer +--------------------- + +K3s includes a basic service load balancer that uses available host ports. If you try to create a load balancer that listens on port 80, for example, it will try to find a free host in the cluster for port 80. If no port is available, the load balancer will stay in Pending. + +To disable the embedded load balancer, run the server with the `--no-deploy servicelb` option. This is necessary if you wish to run a different load balancer, such as MetalLB. \ No newline at end of file diff --git a/content/k3s/latest/en/quick-start/_index.md b/content/k3s/latest/en/quick-start/_index.md index e24d414c2c1..8ec057e36a0 100644 --- a/content/k3s/latest/en/quick-start/_index.md +++ b/content/k3s/latest/en/quick-start/_index.md @@ -1,44 +1,32 @@ --- -title: "Quick-Start" -weight: 1 +title: "Quick-Start Guide" +weight: 10 --- -There are many ways to run k3s, we cover a couple easy ways to get started in this section. -The [installation options](../installation) section will cover in greater detail how k3s can be setup. +This guide will help you quickly launch a cluster with default options. The [installation section](../installation) covers in greater detail how K3s can be set up. + +For information on how K3s components work together, refer to the [architecture section.]({{}}/k3s/latest/en/architecture/#high-availability-with-an-external-db) + +> New to Kubernetes? The official Kubernetes docs already have some great tutorials outlining the basics [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/). Install Script -------------- -The k3s `install.sh` script provides a convenient way for installing to systemd or openrc, -to install k3s as a service just run: +K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run: ```bash curl -sfL https://get.k3s.io | sh - ``` -A kubeconfig file is written to `/etc/rancher/k3s/k3s.yaml` and the service is automatically started or restarted. -The install script will install k3s and additional utilities, such as `kubectl`, `crictl`, `k3s-killall.sh`, and `k3s-uninstall.sh`, for example: +After running this installation: + +* The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed +* Additional utilities will be installed, including `kubectl`, `crictl`, `ctr`, `k3s-killall.sh`, and `k3s-uninstall.sh` +* A [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file will be written to `/etc/rancher/k3s/k3s.yaml` and the kubectl installed by K3s will automatically use it + +To install on worker nodes and add them to the cluster, run the installation script with the `K3S_URL` and `K3S_TOKEN` environment variables. Here is an example showing how to join a worker node: ```bash -sudo kubectl get nodes +curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh - ``` +Setting the `K3S_URL` parameter causes K3s to run in worker mode. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for `K3S_TOKEN` is stored at `/var/lib/rancher/k3s/server/node-token` on your server node. -`K3S_TOKEN` is created at `/var/lib/rancher/k3s/server/node-token` on your server. -To install on worker nodes we should pass `K3S_URL` along with -`K3S_TOKEN` or `K3S_CLUSTER_SECRET` environment variables, for example: -```bash -curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=XXX sh - -``` - -Manual Download ---------------- -1. Download `k3s` from latest [release](https://github.com/rancher/k3s/releases/latest), x86_64, armhf, and arm64 are supported. -2. Run server. - -```bash -sudo k3s server & -# Kubeconfig is written to /etc/rancher/k3s/k3s.yaml -sudo k3s kubectl get nodes - -# On a different node run the below. NODE_TOKEN comes from -# /var/lib/rancher/k3s/server/node-token on your server -sudo k3s agent --server https://myserver:6443 --token ${NODE_TOKEN} -``` +Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the `K3S_NODE_NAME` environment variable and provide a value with a valid and unique hostname for each node. diff --git a/content/k3s/latest/en/running/_index.md b/content/k3s/latest/en/running/_index.md deleted file mode 100644 index 0782e165a14..00000000000 --- a/content/k3s/latest/en/running/_index.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: "Running K3S" -weight: 3 ---- - -This section contains information for running k3s in various environments. - -Starting the Server ------------------- - -The installation script will auto-detect if your OS is using systemd or openrc and start the service. -When running with openrc logs will be created at `/var/log/k3s.log`, or with systemd in `/var/log/syslog` and viewed using `journalctl -u k3s`. An example of installing and auto-starting with the install script: - -```bash -curl -sfL https://get.k3s.io | sh - -``` - -When running the server manually you should get an output similar to: - -``` -$ k3s server -INFO[2019-01-22T15:16:19.908493986-07:00] Starting k3s dev -INFO[2019-01-22T15:16:19.908934479-07:00] Running kube-apiserver --allow-privileged=true --authorization-mode Node,RBAC --service-account-signing-key-file /var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range 10.43.0.0/16 --advertise-port 6445 --advertise-address 127.0.0.1 --insecure-port 0 --secure-port 6444 --bind-address 127.0.0.1 --tls-cert-file /var/lib/rancher/k3s/server/tls/localhost.crt --tls-private-key-file /var/lib/rancher/k3s/server/tls/localhost.key --service-account-key-file /var/lib/rancher/k3s/server/tls/service.key --service-account-issuer k3s --api-audiences unknown --basic-auth-file /var/lib/rancher/k3s/server/cred/passwd --kubelet-client-certificate /var/lib/rancher/k3s/server/tls/token-node.crt --kubelet-client-key /var/lib/rancher/k3s/server/tls/token-node.key -Flag --insecure-port has been deprecated, This flag will be removed in a future version. -INFO[2019-01-22T15:16:20.196766005-07:00] Running kube-scheduler --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --port 0 --secure-port 0 --leader-elect=false -INFO[2019-01-22T15:16:20.196880841-07:00] Running kube-controller-manager --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --service-account-private-key-file /var/lib/rancher/k3s/server/tls/service.key --allocate-node-cidrs --cluster-cidr 10.42.0.0/16 --root-ca-file /var/lib/rancher/k3s/server/tls/token-ca.crt --port 0 --secure-port 0 --leader-elect=false -Flag --port has been deprecated, see --secure-port instead. -INFO[2019-01-22T15:16:20.273441984-07:00] Listening on :6443 -INFO[2019-01-22T15:16:20.278383446-07:00] Writing manifest: /var/lib/rancher/k3s/server/manifests/coredns.yaml -INFO[2019-01-22T15:16:20.474454524-07:00] Node token is available at /var/lib/rancher/k3s/server/node-token -INFO[2019-01-22T15:16:20.474471391-07:00] To join node to cluster: k3s agent -s https://10.20.0.3:6443 -t ${NODE_TOKEN} -INFO[2019-01-22T15:16:20.541027133-07:00] Wrote kubeconfig /etc/rancher/k3s/k3s.yaml -INFO[2019-01-22T15:16:20.541049100-07:00] Run: k3s kubectl -``` - -The output will likely be much longer as the agent will create a lot of logs. By default the server -will register itself as a node (run the agent). - -It is common and almost required these days that the control plane be part of the cluster. -To disable the agent when running the server use the `--disable-agent` flag, the agent can then be run as a separate process. - -Joining Nodes -------------- - -When the server starts it creates a file `/var/lib/rancher/k3s/server/node-token`. -Using the contents of that file as `K3S_TOKEN` and setting `K3S_URL` allows the node -to join as an agent using the install script: - - curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=XXX sh - - -When using the install script openrc logs will be created at `/var/log/k3s-agent.log`, or with systemd in `/var/log/syslog` and viewed using `journalctl -u k3s-agent`. - -Or running k3s manually with the token as `NODE_TOKEN`: - - k3s agent --server https://myserver:6443 --token ${NODE_TOKEN} - -SystemD -------- - -If you are using systemd here is a sample unit `k3s.service`: - -```ini -[Unit] -Description=Lightweight Kubernetes -Documentation=https://k3s.io -After=network-online.target - -[Service] -Type=notify -EnvironmentFile=/etc/systemd/system/k3s.service.env -ExecStart=/usr/local/bin/k3s server -KillMode=process -Delegate=yes -LimitNOFILE=infinity -LimitNPROC=infinity -LimitCORE=infinity -TasksMax=infinity -TimeoutStartSec=0 -Restart=always -RestartSec=5s - -[Install] -WantedBy=multi-user.target -``` - -OpenRC ------- - -And an example openrc `/etc/init.d/k3s`: - -```bash -#!/sbin/openrc-run - -depend() { - after net-online - need net -} - -start_pre() { - rm -f /tmp/k3s.* -} - -supervisor=supervise-daemon -name="k3s" -command="/usr/local/bin/k3s" -command_args="server >>/var/log/k3s.log 2>&1" - -pidfile="/var/run/k3s.pid" -respawn_delay=5 - -set -o allexport -if [ -f /etc/environment ]; then source /etc/environment; fi -if [ -f /etc/rancher/k3s/k3s.env ]; then source /etc/rancher/k3s/k3s.env; fi -set +o allexport -``` - -Alpine Linux ------------- - -In order to pre-setup Alpine Linux you have to go through the following steps: - -```bash -echo "cgroup /sys/fs/cgroup cgroup defaults 0 0" >> /etc/fstab - -cat >> /etc/cgconfig.conf < 11s v1.13.2-k3s2 - d54c8b17c055 Ready 11s v1.13.2-k3s2 - db7a5a5a5bdd Ready 12s v1.13.2-k3s2 - -To run the agent only in Docker, use `docker-compose up node`. Alternatively the Docker run command can also be used; - - sudo docker run \ - -d --tmpfs /run \ - --tmpfs /var/run \ - -e K3S_URL=${SERVER_URL} \ - -e K3S_TOKEN=${NODE_TOKEN} \ - --privileged rancher/k3s:vX.Y.Z - -Air-Gap Support ---------------- - -k3s supports pre-loading of containerd images by placing them in the `images` directory for the agent before starting, for example: -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` -Images needed for a base install are provided through the releases page, additional images can be created with the `docker save` command. - -Offline Helm charts are served from the `/var/lib/rancher/k3s/server/static` directory, and Helm chart manifests may reference the static files with a `%{KUBERNETES_API}%` templated variable. For example, the default traefik manifest chart installs from `https://%{KUBERNETES_API}%/static/charts/traefik-X.Y.Z.tgz`. - -If networking is completely disabled k3s may not be able to start (ie ethernet unplugged or wifi disconnected), in which case it may be necessary to add a default route. For example: -```sh -sudo ip -c address add 192.168.123.123/24 dev eno1 -sudo ip route add default via 192.168.123.1 -``` - -k3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -Upgrades --------- - -To upgrade k3s from an older version you can re-run the installation script using the same flags, for example: - -```sh -curl -sfL https://get.k3s.io | sh - -``` - -If you want to upgrade to specific version you can run the following command: - -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - -``` - -Or to manually upgrade k3s: - -1. Download the desired version of k3s from [releases](https://github.com/rancher/k3s/releases/latest) -2. Install to an appropriate location (normally `/usr/local/bin/k3s`) -3. Stop the old version -4. Start the new version - -Restarting k3s is supported by the installation script for systemd and openrc. -To restart manually for systemd use: -```sh -sudo systemctl restart k3s -``` - -To restart manually for openrc use: -```sh -sudo service k3s restart -``` - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download air-gap images and install if changed -2. Install new k3s binary (from installer or manual download) -3. Restart k3s (if not restarted automatically by installer) - -Uninstalling ------------- - -If you installed k3s with the help of `install.sh` script an uninstall script is generated during installation, which will be created on your server node at `/usr/local/bin/k3s-uninstall.sh` (or as `k3s-agent-uninstall.sh`). - -Hyperkube ---------- - -k3s is bundled in a nice wrapper to remove the majority of the headache of running k8s. If -you don't want that wrapper and just want a smaller k8s distro, the releases includes -the `hyperkube` binary you can use. It's then up to you to know how to use `hyperkube`. If -you want individual binaries you will need to compile them yourself from source. diff --git a/content/k3s/latest/en/storage/_index.md b/content/k3s/latest/en/storage/_index.md new file mode 100644 index 00000000000..790cbd04e38 --- /dev/null +++ b/content/k3s/latest/en/storage/_index.md @@ -0,0 +1,152 @@ +--- +title: "Volumes and Storage" +weight: 30 +--- + +When deploying an application that needs to retain data, you’ll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application’s pod fails. + +A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. For details on how PVs and PVCs work, refer to the official Kubernetes documentation on [storage.](https://kubernetes.io/docs/concepts/storage/volumes/) + +This page describes how to set up persistent storage with a local storage provider, or with [Longhorn.](#setting-up-longhorn) + +# Setting up the Local Storage Provider +K3s comes with Rancher's Local Path Provisioner and this enables the ability to create persistent volume claims out of the box using local storage on the respective node. Below we cover a simple example. For more information please reference the official documentation [here](https://github.com/rancher/local-path-provisioner/blob/master/README.md#usage). + +Create a hostPath backed persistent volume claim and a pod to utilize it: + +### pvc.yaml + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: local-path-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-path + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +``` +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: local-path-pvc +``` + +Apply the yaml: + +``` +kubectl create -f pvc.yaml +kubectl create -f pod.yaml +``` + +Confirm the PV and PVC are created: + +``` +kubectl get pv +kubectl get pvc +``` + +The status should be Bound for each. + +# Setting up Longhorn + +[comment]: <> (pending change - longhorn may support arm64 and armhf in the future.) + +> **Note:** At this time Longhorn only supports amd64. + +K3s supports [Longhorn](https://github.com/longhorn/longhorn). Longhorn is an open-source distributed block storage system for Kubernetes. + +Below we cover a simple example. For more information, refer to the official documentation [here](https://github.com/longhorn/longhorn/blob/master/README.md). + +Apply the longhorn.yaml to install Longhorn: + +``` +kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml +``` + +Longhorn will be installed in the namespace `longhorn-system`. + +Before we create a PVC, we will create a storage class for Longhorn with this yaml: + +``` +kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/examples/storageclass.yaml +``` + +Apply the yaml to create the PVC and pod: + +``` +kubectl create -f pvc.yaml +kubectl create -f pod.yaml +``` + +### pvc.yaml + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: longhorn-volv-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +``` +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: longhorn-volv-pvc +``` + +Confirm the PV and PVC are created: + +``` +kubectl get pv +kubectl get pvc +``` + +The status should be Bound for each. diff --git a/content/k3s/latest/en/upgrades/_index.md b/content/k3s/latest/en/upgrades/_index.md new file mode 100644 index 00000000000..3ce3a0591a3 --- /dev/null +++ b/content/k3s/latest/en/upgrades/_index.md @@ -0,0 +1,44 @@ +--- +title: "Upgrades" +weight: 25 +--- + +You can upgrade K3s by using the installation script, or by manually installing the binary of the desired version. + +>**Note:** When upgrading, upgrade server nodes first one at a time, then any worker nodes. + +### Upgrade K3s Using the Installation Script + +To upgrade K3s from an older version you can re-run the installation script using the same flags, for example: + +```sh +curl -sfL https://get.k3s.io | sh - +``` + +If you want to upgrade to specific version you can run the following command: + +```sh +curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - +``` + +### Manually Upgrade K3s Using the Binary + +Or to manually upgrade K3s: + +1. Download the desired version of K3s from [releases](https://github.com/rancher/k3s/releases/latest) +2. Install to an appropriate location (normally `/usr/local/bin/k3s`) +3. Stop the old version +4. Start the new version + +### Restarting K3s + +Restarting K3s is supported by the installation script for systemd and openrc. +To restart manually for systemd use: +```sh +sudo systemctl restart k3s +``` + +To restart manually for openrc use: +```sh +sudo service k3s restart +``` \ No newline at end of file diff --git a/content/os/v1.x/en/_index.md b/content/os/v1.x/en/_index.md index 258d634ed5d..1fd27ba96da 100644 --- a/content/os/v1.x/en/_index.md +++ b/content/os/v1.x/en/_index.md @@ -35,7 +35,7 @@ System Docker runs a special container called **Docker**, which is another Docke We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. -![How it works]({{< baseurl >}}/img/os/rancheroshowitworks.png) +{{< img "/img/os/rancheroshowitworks.png" "How it works">}} ### Running RancherOS diff --git a/content/os/v1.x/en/about/security/_index.md b/content/os/v1.x/en/about/security/_index.md index 8697f09084a..00286cf1a53 100644 --- a/content/os/v1.x/en/about/security/_index.md +++ b/content/os/v1.x/en/about/security/_index.md @@ -14,7 +14,7 @@ weight: 303

Please submit possible security issues by emailing security@rancher.com

-

Announcments

+

Announcements

Subscribe to the Rancher announcements forum for release updates.

@@ -33,7 +33,7 @@ weight: 303 | [CVE-2017-5715](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715) | Systems with microprocessors utilizing speculative execution and indirect branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis | 6 Feb 2018 | [RancherOS v1.1.4](https://github.com/rancher/os/releases/tag/v1.1.4) using Linux v4.9.78 with the Retpoline support | | [CVE-2017-5753](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5753) | Systems with microprocessors utilizing speculative execution and branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | | [CVE-2018-8897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-8897) | A statement in the System Programming Guide of the Intel 64 and IA-32 Architectures Software Developer's Manual (SDM) was mishandled in the development of some or all operating-system kernels, resulting in unexpected behavior for #DB exceptions that are deferred by MOV SS or POP SS, as demonstrated by (for example) privilege escalation in Windows, macOS, some Xen configurations, or FreeBSD, or a Linux kernel crash. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | -| [L1 Terminal Fault](https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | +| [CVE-2018-3620](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3620) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | | [CVE-2018-3639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639) | Systems with microprocessors utilizing speculative execution and speculative execution of memory reads before the addresses of all prior memory writes are known may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis, aka Speculative Store Bypass (SSB), Variant 4. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | | [CVE-2018-17182](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17182) | The vmacache_flush_all function in mm/vmacache.c mishandles sequence number overflows. An attacker can trigger a use-after-free (and possibly gain privileges) via certain thread creation, map, unmap, invalidation, and dereference operations. | 18 Oct 2018 | [RancherOS v1.4.2](https://github.com/rancher/os/releases/tag/v1.4.2) using Linux v4.14.73 | | [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) | runc through 1.0-rc6, as used in Docker before 18.09.2 and other products, allows attackers to overwrite the host runc binary (and consequently obtain host root access) by leveraging the ability to execute a command as root within one of these types of containers: (1) a new container with an attacker-controlled image, or (2) an existing container, to which the attacker previously had write access, that can be attached with docker exec. This occurs because of file-descriptor mishandling, related to /proc/self/exe. | 12 Feb 2019 | [RancherOS v1.5.1](https://github.com/rancher/os/releases/tag/v1.5.1) | diff --git a/content/os/v1.x/en/installation/amazon-ecs/_index.md b/content/os/v1.x/en/installation/amazon-ecs/_index.md index fa14096e0d8..a76c7675044 100644 --- a/content/os/v1.x/en/installation/amazon-ecs/_index.md +++ b/content/os/v1.x/en/installation/amazon-ecs/_index.md @@ -58,25 +58,25 @@ rancher: ### Amazon ECS enabled AMIs -Latest Release: [v1.5.3](https://github.com/rancher/os/releases/tag/v1.5.3) +Latest Release: [v1.5.4](https://github.com/rancher/os/releases/tag/v1.5.4) Region | Type | AMI ---|--- | --- -eu-north-1 | HVM - ECS enabled | [ami-02042aefd9a6743c0](https://eu-north-1.console.aws.amazon.com/ec2/home?region=eu-north-1#launchInstanceWizard:ami=ami-02042aefd9a6743c0) -ap-south-1 | HVM - ECS enabled | [ami-097e19198e915f12c](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-097e19198e915f12c) -eu-west-3 | HVM - ECS enabled | [ami-0622559381120fe22](https://eu-west-3.console.aws.amazon.com/ec2/home?region=eu-west-3#launchInstanceWizard:ami=ami-0622559381120fe22) -eu-west-2 | HVM - ECS enabled | [ami-081d1809e05a29ff9](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-081d1809e05a29ff9) -eu-west-1 | HVM - ECS enabled | [ami-08f19c0126135b103](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-08f19c0126135b103) -ap-northeast-2 | HVM - ECS enabled | [ami-08bba0cd9934cef90](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-08bba0cd9934cef90) -ap-northeast-1 | HVM - ECS enabled | [ami-0a7a9e44ec4c01f7e](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-0a7a9e44ec4c01f7e) -sa-east-1 | HVM - ECS enabled | [ami-0ad4e6bd39fe14dfa](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-0ad4e6bd39fe14dfa) -ca-central-1 | HVM - ECS enabled | [ami-0bde65d7509878a90](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-0bde65d7509878a90) -ap-southeast-1 | HVM - ECS enabled | [ami-085ce6d3cf455dba0](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-085ce6d3cf455dba0) -ap-southeast-2 | HVM - ECS enabled | [ami-004dc02c07766a9a6](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-004dc02c07766a9a6) -eu-central-1 | HVM - ECS enabled | [ami-0fa23b013188bf809](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-0fa23b013188bf809) -us-east-1 | HVM - ECS enabled | [ami-0395c86bff9bc1bce](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-0395c86bff9bc1bce) -us-east-2 | HVM - ECS enabled | [ami-02027918438bc6897](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-02027918438bc6897) -us-west-1 | HVM - ECS enabled | [ami-03e54b15c63b99c47](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-03e54b15c63b99c47) -us-west-2 | HVM - ECS enabled | [ami-0a7f51b27f45e8d77](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-0a7f51b27f45e8d77) -cn-north-1 | HVM - ECS enabled | [ami-0dfbc6d88d4048e24](https://cn-north-1.console.amazonaws.cn/ec2/home?region=cn-north-1#launchInstanceWizard:ami=ami-0dfbc6d88d4048e24) -cn-northwest-1 | HVM - ECS enabled | [ami-04d3267529863091d](https://cn-northwest-1.console.amazonaws.cn/ec2/home?region=cn-northwest-1#launchInstanceWizard:ami=ami-04d3267529863091d) +eu-north-1 | HVM - ECS enabled | [ami-0c46c1da6468aa948](https://eu-north-1.console.aws.amazon.com/ec2/home?region=eu-north-1#launchInstanceWizard:ami=ami-0c46c1da6468aa948) +ap-south-1 | HVM - ECS enabled | [ami-097e5fa868c46e925](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-097e5fa868c46e925) +eu-west-3 | HVM - ECS enabled | [ami-016e7d630d7f608e4](https://eu-west-3.console.aws.amazon.com/ec2/home?region=eu-west-3#launchInstanceWizard:ami=ami-016e7d630d7f608e4) +eu-west-2 | HVM - ECS enabled | [ami-00aacd261ab72302e](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-00aacd261ab72302e) +eu-west-1 | HVM - ECS enabled | [ami-0812b3f8aec8d2d81](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-0812b3f8aec8d2d81) +ap-northeast-2 | HVM - ECS enabled | [ami-0d9d77df6579e618a](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-0d9d77df6579e618a) +ap-northeast-1 | HVM - ECS enabled | [ami-09e957ac11ef430a3](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-09e957ac11ef430a3) +sa-east-1 | HVM - ECS enabled | [ami-09c22f3ce89280ed4](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-09c22f3ce89280ed4) +ca-central-1 | HVM - ECS enabled | [ami-016ac80225e649cf9](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-016ac80225e649cf9) +ap-southeast-1 | HVM - ECS enabled | [ami-06cdfc80bdbd6f419](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-06cdfc80bdbd6f419) +ap-southeast-2 | HVM - ECS enabled | [ami-0335f7bb1c51c0a74](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-0335f7bb1c51c0a74) +eu-central-1 | HVM - ECS enabled | [ami-0af71ec7ee8b729be](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-0af71ec7ee8b729be) +us-east-1 | HVM - ECS enabled | [ami-07209d7ec9e7545b4](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-07209d7ec9e7545b4) +us-east-2 | HVM - ECS enabled | [ami-046358fe356dd0e35](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-046358fe356dd0e35) +us-west-1 | HVM - ECS enabled | [ami-031bcb65b47cb0a77](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-031bcb65b47cb0a77) +us-west-2 | HVM - ECS enabled | [ami-0d92d296ecb13ea45](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-0d92d296ecb13ea45) +cn-north-1 | HVM - ECS enabled | [ami-04f1668aaf990acf6](https://cn-north-1.console.amazonaws.cn/ec2/home?region=cn-north-1#launchInstanceWizard:ami=ami-04f1668aaf990acf6) +cn-northwest-1 | HVM - ECS enabled | [ami-0771f259ffce58280](https://cn-northwest-1.console.amazonaws.cn/ec2/home?region=cn-northwest-1#launchInstanceWizard:ami=ami-0771f259ffce58280) diff --git a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md index 49d4242bb19..cafa5232098 100644 --- a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md +++ b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md @@ -49,7 +49,7 @@ On desktop systems the Syslinux boot menu can be switched to graphical mode by a #### Recovery console -`rancher.recovery=true` will start a single user `root` bash session as easily in the boot process, with no network, or persitent filesystem mounted. This can be used to fix disk problems, or to debug your system. +`rancher.recovery=true` will start a single user `root` bash session as easily in the boot process, with no network, or persistent filesystem mounted. This can be used to fix disk problems, or to debug your system. #### Enable/Disable sshd @@ -61,7 +61,7 @@ On desktop systems the Syslinux boot menu can be switched to graphical mode by a #### Autologin console -`rancher.autologin=` will automatically log in the sepcified console - common values are `tty1`, `ttyS0` and `ttyAMA0` - depending on your platform. +`rancher.autologin=` will automatically log in the specified console - common values are `tty1`, `ttyS0` and `ttyAMA0` - depending on your platform. #### Enable/Disable hypervisor service auto-enable diff --git a/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md new file mode 100644 index 00000000000..13ec156209f --- /dev/null +++ b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md @@ -0,0 +1,22 @@ +--- +title: Date and time zone +weight: 121 +--- + +The default console keeps time in the Coordinated Universal Time (UTC) zone and synchronizes clocks with the Network Time Protocol (NTP). The Network Time Protocol daemon (ntpd) is an operating system program that maintains the system time in synchronization with time servers using the NTP. + +RancherOS can run ntpd in the System Docker container. You can update its configurations by updating `/etc/ntp.conf`. For an example of how to update a file such as `/etc/ntp.conf` within a container, refer to [this page.]({{< baseurl >}}/os/v1.x/en/installation/configuration/write-files/#writing-files-in-specific-system-services) + +The default console cannot support changing the time zone because including `tzdata` (time zone data) will increase the ISO size. However, you can change the time zone in the container by passing a flag to specify the time zone when you run the container: + +``` +$ docker run -e TZ=Europe/Amsterdam debian:jessie date +Tue Aug 20 09:28:19 CEST 2019 +``` + +You may need to install the `tzdata` in some images: + +``` +$ docker run -e TZ=Asia/Shanghai -e DEBIAN_FRONTEND=noninteractive -it --rm ubuntu /bin/bash -c "apt-get update && apt-get install -yq tzdata && date” +Thu Aug 29 08:13:02 CST 2019 +``` diff --git a/content/os/v1.x/en/installation/configuration/docker/_index.md b/content/os/v1.x/en/installation/configuration/docker/_index.md index 0383552f91a..0620f6ecd6d 100644 --- a/content/os/v1.x/en/installation/configuration/docker/_index.md +++ b/content/os/v1.x/en/installation/configuration/docker/_index.md @@ -86,7 +86,7 @@ _Available as of v1.4.x_ The docker0 bridge can be configured with docker args, it will take effect after reboot. ``` -$ ros config set rancher.docker.bip 192.168.100.1/16 +$ ros config set rancher.docker.bip 192.168.0.0/16 ``` ### Configuring System Docker @@ -114,13 +114,13 @@ _Available as of v1.4.x_ The docker-sys bridge can be configured with system-docker args, it will take effect after reboot. ``` -$ ros config set rancher.system_docker.bip 172.18.43.1/16 +$ ros config set rancher.system_docker.bip 172.19.0.0/16 ``` _Available as of v1.4.x_ -The default path of system-docker logs is `/var/log/system-docker.log`. If you want to write the system-docker logs to a separate partition, -e.g. [RANCHE_OEM partition]({{< baseurl >}}/os/v1.x/en/about/custom-partition-layout/#use-rancher-oem-partition), you can try `rancher.defaults.system_docker_logs`: +The default path of system-docker logs is `/var/log/system-docker.log`. If you want to write the system-docker logs to a separate partition, +e.g. [RANCHER_OEM partition]({{< baseurl >}}/os/v1.x/en/about/custom-partition-layout/#use-rancher-oem-partition), you can try `rancher.defaults.system_docker_logs`: ``` #cloud-config @@ -170,11 +170,11 @@ Status: Downloaded newer image for alpine:latest _Available as of v1.5.0_ -When RancherOS is booted, you start with a User Docker service that is running in System Docker. With v1.5.0, RancherOS has the ability to create additional User Docker services that can run at the same time. +When RancherOS is booted, you start with a User Docker service that is running in System Docker. With v1.5.0, RancherOS has the ability to create additional User Docker services that can run at the same time. #### Terminology -Throughout the rest of this documentation, we may simplify to use these terms when describing Docker. +Throughout the rest of this documentation, we may simplify to use these terms when describing Docker. | Terminology | Definition | |-----------------------|--------------------------------------------------| @@ -184,13 +184,13 @@ Throughout the rest of this documentation, we may simplify to use these terms wh #### Pre-Requisites -User Docker must be set as Docker 17.12.1 or earlier. If it's a later Docker version, it will produce errors when creating a user defined network in System Docker. +User Docker must be set as Docker 17.12.1 or earlier. If it's a later Docker version, it will produce errors when creating a user defined network in System Docker. ``` $ ros engine switch docker-17.12.1-ce ``` -You will need to create a user-defined network, which will be used when creating the Other User Docker. +You will need to create a user-defined network, which will be used when creating the Other User Docker. ``` $ system-docker network create --subnet=172.20.0.0/16 dind @@ -204,7 +204,7 @@ In order to create another User Docker, you will use `ros engine create`. Curren $ ros engine create otheruserdockername --network=dind --fixed-ip=172.20.0.2 ``` -After the Other User Docker service is created, users can query this service like other services. +After the Other User Docker service is created, users can query this service like other services. ``` $ ros service list @@ -215,13 +215,13 @@ disabled volume-nfs enabled otheruserdockername ``` -You can use `ros service up` to start the Other User Docker service. +You can use `ros service up` to start the Other User Docker service. ``` $ ros service up otheruserdockername ``` -After the Other User Docker service is running, you can interact with it just like you can use the built-in User Docker. You would need to append `-` to `docker`. +After the Other User Docker service is running, you can interact with it just like you can use the built-in User Docker. You would need to append `-` to `docker`. ``` $ docker-otheruserdockername ps -a @@ -229,7 +229,7 @@ $ docker-otheruserdockername ps -a #### SSH into the Other User Docker container -When creating the Other User Docker, you can set an external SSH port so you can SSH into the Other User Docker container in System Docker. By using `--ssh-port` and adding ssh keys with `--authorized-keys`, you can set up this optional SSH port. +When creating the Other User Docker, you can set an external SSH port so you can SSH into the Other User Docker container in System Docker. By using `--ssh-port` and adding ssh keys with `--authorized-keys`, you can set up this optional SSH port. ``` $ ros engine create --help @@ -248,7 +248,7 @@ When using `--authorized-keys`, you will need to put the key file in one of the /home/ ``` -RancherOS will generate a random password for each Other User Docker container, which can be viewed in the container logs. If you do not set any SSH keys, the password can be used. +RancherOS will generate a random password for each Other User Docker container, which can be viewed in the container logs. If you do not set any SSH keys, the password can be used. ``` $ system-docker logs otheruserdockername @@ -259,7 +259,7 @@ password: xCrw6fEG ====================================== ``` -In System Docker, you can SSH into any Other Uesr Docker Container using `ssh`. +In System Docker, you can SSH into any Other User Docker Container using `ssh`. ``` $ system-docker ps @@ -274,7 +274,7 @@ $ ssh root@ #### Removing any Other User Docker Service -We recommend using `ros engine rm` to remove any Other User Docker service. +We recommend using `ros engine rm` to remove any Other User Docker service. ``` $ ros engine rm otheruserdockername diff --git a/content/os/v1.x/en/installation/configuration/running-commands/_index.md b/content/os/v1.x/en/installation/configuration/running-commands/_index.md index 01bc8047343..11b8d44d8be 100644 --- a/content/os/v1.x/en/installation/configuration/running-commands/_index.md +++ b/content/os/v1.x/en/installation/configuration/running-commands/_index.md @@ -12,7 +12,7 @@ runcmd: - echo "test" > /home/rancher/test2 ``` -Commands specified using `runcmd` will be executed within the context of the `console` container. More details on the ordering of commands run in the `console` container can be found [here]({{< baseurl >}}/os/v1.x/en/installation/boot-process/built-in-system-services/#console). +Commands specified using `runcmd` will be executed within the context of the `console` container. ### Running Docker commands diff --git a/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md b/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md index 65fe0ad175a..e51d1d46405 100644 --- a/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md +++ b/content/os/v1.x/en/installation/configuration/switching-docker-versions/_index.md @@ -83,7 +83,7 @@ FROM scratch COPY engine /engine ``` -Once the image is built a [system service]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) configuration file must be created. An [example file](https://github.com/rancher/os-services/blob/master/d/docker-1.12.3.yml) can be found in the rancher/os-services repo. Change the `image` field to point to the Docker engine image you've built. +Once the image is built a [system service]({{< baseurl >}}/os/v1.x/en/installation/system-services/adding-system-services/) configuration file must be created. An [example file](https://github.com/rancher/os-services/blob/master/d/docker-18.06.3-ce.yml) can be found in the rancher/os-services repo. Change the `image` field to point to the Docker engine image you've built. All of the previously mentioned methods of switching Docker engines are now available. For example, if your service file is located at `https://myservicefile` then the following cloud-config file could be used to use your custom Docker engine. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md index 68f8389866e..697189f8d9d 100644 --- a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md +++ b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md @@ -64,7 +64,7 @@ $ USER_DOCKER_VERSION=17.03.2 make release _Available as of v1.5.0_ -When building RancherOS, you have the ability to automatically start in a supported [console]({{< baseurl >}}/os/v1.x/en/installation/switching-consoles/) instead of booting into the default console and switching to your desired one. +When building RancherOS, you have the ability to automatically start in a supported console instead of booting into the default console and switching to your desired one. Here is an example of building RancherOS and having the `alpine` console enabled: diff --git a/content/os/v1.x/en/installation/networking/interfaces/_index.md b/content/os/v1.x/en/installation/networking/interfaces/_index.md index f33ac9f7de9..f93384e4e53 100644 --- a/content/os/v1.x/en/installation/networking/interfaces/_index.md +++ b/content/os/v1.x/en/installation/networking/interfaces/_index.md @@ -232,7 +232,7 @@ rancher: scan_ssid: 1 ``` -When adding in WiFi access, you do not need a system reboot, you only need to restart the `network` service in System Docker. +When adding in WiFi access, you do not need a system reboot, you only need to restart the `network` service in System Docker. ``` $ sudo system-docker restart network @@ -244,13 +244,13 @@ $ sudo system-docker restart network _Available as of v1.5_ -In order to support 4G-LTE, 4G-LTE module will need to be connected to the motherboard and to get a good signal, an external atenna will need to be added. You can assemble such a device, which supports USB interface and SIM cards slot: +In order to support 4G-LTE, 4G-LTE module will need to be connected to the motherboard and to get a good signal, an external antenna will need to be added. You can assemble such a device, which supports USB interface and SIM cards slot: ![](https://ws1.sinaimg.cn/bmiddle/006tNc79ly1fzcuvhu6zpj30k80qwag1.jpg) -In order to use RancherOS, you will need to use the ISO built for 4G-LTE support. This ISO has a built-in `modem-manager` service and is available with each release. +In order to use RancherOS, you will need to use the ISO built for 4G-LTE support. This ISO has a built-in `modem-manager` service and is available with each release. -After booting the ISO, there will be a 4G NIC, such as `wwan0`. Use the following `cloud-config` to set the APN parameter. +After booting the ISO, there will be a 4G NIC, such as `wwan0`. Use the following `cloud-config` to set the APN parameter. ```yaml rancher: @@ -266,4 +266,4 @@ After any configuration changes, restart the `modem-manager` service to apply th $ sudo system-docker restart modem-manager ``` -> **Note:** Currently, RancherOS has some built-in rules in `udev` rules to allow RancherOS to recognize specific 4G devices, but there are additional vendors that may be missing. If you need to add these in, please file an issue. +> **Note:** Currently, RancherOS has some built-in rules in `udev` rules to allow RancherOS to recognize specific 4G devices, but there are additional vendors that may be missing. If you need to add these in, please file an issue. diff --git a/content/os/v1.x/en/installation/running-rancheros/_index.md b/content/os/v1.x/en/installation/running-rancheros/_index.md index 4c8b2845a1f..c677f71c35e 100644 --- a/content/os/v1.x/en/installation/running-rancheros/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/_index.md @@ -24,6 +24,12 @@ RancherOS runs on virtualization platforms, cloud providers and bare metal serve [Azure]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/azure) +[OpenStack]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/openstack) + +[VMware ESXi]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) + +[Aliyun]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/aliyun) + #### Bare Metal & Virtual Servers [PXE]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/server/pxe) diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md new file mode 100644 index 00000000000..ce08ce913fb --- /dev/null +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/aliyun/_index.md @@ -0,0 +1,34 @@ +--- +title: Aliyun +weight: 111 +--- + +# Adding the RancherOS Image into Aliyun + +RancherOS is available as an image in Aliyun, and can be easily run in Elastic Compute Service (ECS). Let’s walk through how to upload the ECS image. + +1. Download the most recent RancherOS image. The image `rancheros-aliyun.vhd` can be found in the [release artifacts](https://github.com/rancher/os/releases). +2. Follow Aliyun's instructions on how to [upload the image](https://help.aliyun.com/document_detail/127285.html). Before the image can be added, it must be uploaded into an OSS bucket. +3. Once the image is added to your ECS, we can start creating new instances! + +Example: + +![RancherOS on Aliyun 1]({{< baseurl >}}/img/os/RancherOS_aliyun1.jpg) + +## Options + +| Option | Description | +| --- | --- | +| Root disk size | The size must be greater than 10GB. Note: When booting the instance, the value must be kept the same. | +| Platform | Select `Others Linux` | +| Image Format | Select `VHD` | + +### Launching RancherOS using Aliyun Console + +After the image is uploaded, we can use the `Aliyun Console` to start a new instance. Currently, RancherOS on Aliyun only supports SSH key access, so it can only be deployed through the UI. + +Since the image is private, we need to use the `Custom Images`. + +![RancherOS on Aliyun 2]({{< baseurl >}}/img/os/RancherOS_aliyun2.jpg) + +After the instance is successfully started, we can login with the `rancher` user via SSH. diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md index 969fc387daa..e8886b5f617 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md @@ -25,17 +25,17 @@ Let’s walk through how to import and create a RancherOS on EC2 machine using t 1. First login to your AWS console, and go to the EC2 dashboard, click on **Launch Instance**: - ![RancherOS on AWS 1]({{< baseurl >}}/img/os/Rancher_aws1.png) + {{< img "/img/os/Rancher_aws1.png" "RancherOS on AWS 1">}} 2. Select the **Community AMIs** on the sidebar and search for **RancherOS**. Pick the latest version and click **Select**. - ![RancherOS on AWS 2]({{< baseurl >}}/img/os/Rancher_aws2.png) + {{< img "/img/os/Rancher_aws2.png" "RancherOS on AWS 2">}} 3. Go through the steps of creating the instance type through the AWS console. If you want to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file during boot of RancherOS, you'd pass in the file as **User data** by expanding the **Advanced Details** in **Step 3: Configure Instance Details**. You can pass in the data as text or as a file. - ![RancherOS on AWS 6]({{< baseurl >}}/img/os/Rancher_aws6.png) + {{< img "/img/os/Rancher_aws6.png" "RancherOS on AWS 6">}} After going through all the steps, you finally click on **Launch**, and either create a new key pair or choose an existing key pair to be used with the EC2 instance. If you have created a new key pair, download the key pair. If you have chosen an existing key pair, make sure you have the key pair accessible. Click on **Launch Instances**. - ![RancherOS on AWS 3]({{< baseurl >}}/img/os/Rancher_aws3.png) + {{< img "/img/os/Rancher_aws3.png" "RancherOS on AWS 3">}} 4. Your instance will be launching and you can click on **View Instances** to see it's status. - ![RancherOS on AWS 4]({{< baseurl >}}/img/os/Rancher_aws4.png) + {{< img "/img/os/Rancher_aws4.png" "RancherOS on AWS 4">}} Your instance is now running! - ![RancherOS on AWS 5]({{< baseurl >}}/img/os/Rancher_aws5.png) + {{< img "/img/os/Rancher_aws5.png" "RancherOS on AWS 5">}} ## Logging into RancherOS diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md index 9390b0092f4..d644822ded6 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/do/_index.md @@ -3,7 +3,7 @@ title: Digital Ocean weight: 107 --- -RancherOS is avaliable in the Digital Ocean portal. RancherOS is a member of container distributions and you can find it easily. +RancherOS is available in the Digital Ocean portal. RancherOS is a member of container distributions and you can find it easily. >**Note** >Deploying to Digital Ocean will incur charges. diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/_index.md index 78bb92c18a2..b4ccdb6fa25 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/_index.md @@ -7,7 +7,7 @@ As of v1.1.0, RancherOS automatically detects that it is running on VMware ESXi, As of v1.5.0, RancherOS releases anything required for VMware, which includes initrd, a standard ISO for VMware, a `vmdk` image, and a specific ISO to be used with Docker Machine. The open-vm-tools is built in to RancherOS, there is no need to download it. -| Description | Downlaod URL | +| Description | Download URL | |---|---| | Booting from ISO | https://releases.rancher.com/os/latest/vmware/rancheros.iso | | For docker-machine | https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso | diff --git a/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md b/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md index 86c83c842d5..e0deb1b54a4 100644 --- a/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/server/install-to-disk/_index.md @@ -82,6 +82,22 @@ rancher/os:v0.5.0 remote Alternatively, you can set the installer image to any image in System Docker to install RancherOS. This is particularly useful for machines that will not have direct access to the internet. +#### Caching Images + +_Available as of v1.5.3_ + +Some configurations included in `cloud-config` require images to be downloaded from Docker to start. After installation, these images are downloaded automatically by RancherOS when booting. An example of these configurations are: + +- rancher.services_include +- rancher.console +- rancher.docker + +If you want to download and save these images to disk during installation, they will be cached and not need to be downloaded again upon each boot. You can cache these images by adding `-s` when using `ros install`: + +``` +$ ros install -d -c -s +``` + ### SSH into RancherOS After installing RancherOS, you can ssh into RancherOS using your private key and the **rancher** user. diff --git a/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md b/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md index da9145dae25..ba63929e047 100644 --- a/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md +++ b/content/os/v1.x/en/installation/system-services/custom-system-services/_index.md @@ -165,12 +165,12 @@ Once you have your own Services repository, you can add a new service to its ind To create your own console images, you need to: -1 install some basic tools, including an ssh daemon, sudo, and kernel module tools -2 create `rancher` and `docker` users and groups with UID and GID's of `1100` and `1101` respectively -3 add both users to the `docker` and `sudo` groups -4 add both groups into the `/etc/sudoers` file to allow password-less sudo -5 configure sshd to accept logins from users in the `docker` group, and deny `root`. -6 set `ENTRYPOINT ["/usr/bin/ros", "entrypoint"]` +1. install some basic tools, including an ssh daemon, sudo, and kernel module tools +2. create `rancher` and `docker` users and groups with UID and GID's of `1100` and `1101` respectively +3. add both users to the `docker` and `sudo` groups +4. add both groups into the `/etc/sudoers` file to allow password-less sudo +5. configure sshd to accept logins from users in the `docker` group, and deny `root`. +6. set `ENTRYPOINT ["/usr/bin/ros", "entrypoint"]` the `ros` binary, and other host specific configuration files will be bind mounted into the running console container when its launched. diff --git a/content/os/v1.x/en/installation/system-services/environment/_index.md b/content/os/v1.x/en/installation/system-services/environment/_index.md index a0d9746613c..c3990e318a9 100644 --- a/content/os/v1.x/en/installation/system-services/environment/_index.md +++ b/content/os/v1.x/en/installation/system-services/environment/_index.md @@ -3,7 +3,7 @@ title: Environment weight: 143 --- -The [environment key](https://docs.docker.com/compose/yml/#environment) can be used to customize system services. When a value is not assigned, RancherOS looks up the value from the `rancher.environment` key. +The [environment key](https://docs.docker.com/compose/compose-file/#environment) can be used to customize system services. When a value is not assigned, RancherOS looks up the value from the `rancher.environment` key. In the example below, `ETCD_DISCOVERY` will be set to `https://discovery.etcd.io/d1cd18f5ee1c1e2223aed6a1734719f7` for the `etcd` service. diff --git a/content/os/v1.x/en/overview/_index.md b/content/os/v1.x/en/overview/_index.md index 1258dfe7db9..264f130ef15 100644 --- a/content/os/v1.x/en/overview/_index.md +++ b/content/os/v1.x/en/overview/_index.md @@ -35,7 +35,7 @@ System Docker runs a special container called **Docker**, which is another Docke We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. -![How it works]({{< baseurl >}}/img/os/rancheroshowitworks.png) +{{< img "/img/os/rancheroshowitworks.png" "How it works">}} ### Running RancherOS diff --git a/content/os/v1.x/en/quick-start-guide/_index.md b/content/os/v1.x/en/quick-start-guide/_index.md index 945ef763043..7e01e0fc0a3 100644 --- a/content/os/v1.x/en/quick-start-guide/_index.md +++ b/content/os/v1.x/en/quick-start-guide/_index.md @@ -92,7 +92,7 @@ $ sudo system-docker run -d --net=host --name busydash husseingalal/busydash ``` In the command, we used `--net=host` to tell System Docker not to containerize the container's networking, and use the host’s networking instead. After running the container, you can see the monitoring server by accessing `http://`. -![System Docker Container]({{< baseurl >}}/img/os/Rancher_busydash.png) +{{< img "/img/os/Rancher_busydash.png" "System Docker Container">}} To make the container survive during the reboots, you can create the `/opt/rancher/bin/start.sh` script, and add the Docker start line to launch the Docker at each startup. diff --git a/content/rancher/v2.x/en/_index.md b/content/rancher/v2.x/en/_index.md index ac450e030e3..1cdb421ebd0 100644 --- a/content/rancher/v2.x/en/_index.md +++ b/content/rancher/v2.x/en/_index.md @@ -1,11 +1,15 @@ --- -shortTitle: Rancher 2.x +title: "Rancher 2.x" +shortTitle: "Rancher 2.x" +description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." +metaTitle: "Rancher 2.x Docs: What is New?" +metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." insertOneSix: true weight: 1 ctaBanner: intro-k8s-rancher-online-training --- -## What's New? +# What's New? Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher now exclusively deploys and manages multiple Kubernetes clusters running anywhere, on any provider. It can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or inherit existing Kubernetes clusters running anywhere. diff --git a/content/rancher/v2.x/en/admin-settings/_index.md b/content/rancher/v2.x/en/admin-settings/_index.md index b2589089e8f..e1dc6d52f2c 100644 --- a/content/rancher/v2.x/en/admin-settings/_index.md +++ b/content/rancher/v2.x/en/admin-settings/_index.md @@ -1,12 +1,11 @@ --- -title: Global Configuration +title: Authentication, Permissions and Global Configuration weight: 1100 aliases: - /rancher/v2.x/en/concepts/global-configuration/ - /rancher/v2.x/en/tasks/global-configuration/ - /rancher/v2.x/en/concepts/global-configuration/server-url/ - /rancher/v2.x/en/tasks/global-configuration/server-url/ - - /rancher/v2.x/en/admin-settings/server-url/ - /rancher/v2.x/en/admin-settings/log-in/ --- @@ -28,7 +27,7 @@ For more information how authentication works and how to configure each provider Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/). +For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.x/en/admin-settings/rbac/). ## Pod Security Policies @@ -41,3 +40,21 @@ For more information how to create and use PSPs, see [Pod Security Policies]({{< Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. For more information, see [Provisioning Drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/). + +## Adding Kubernetes Versions into Rancher + +_Available as of v2.3.0_ + +With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. + +The information that Rancher uses to provision [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/) + +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + +For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata/). + +## Enabling Experimental Features + +_Available as of v2.3.0_ + +Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.x/en/admin-settings/feature-flags/) diff --git a/content/rancher/v2.x/en/admin-settings/authentication/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/_index.md index 0dc19a78a1b..6a786d9a78f 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/_index.md @@ -18,27 +18,52 @@ The Rancher authentication proxy integrates with the following external authenti | Auth Service | Available as of | | ------------------------------------------------------------------------------------------------ | ---------------- | -| [Microsoft Active Directory]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/) | v2.0.0 | -| [GitHub]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/github/) | v2.0.0 | -| [Microsoft Azure AD]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/azure-ad/) | v2.0.3 | -| [FreeIPA]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/freeipa/) | v2.0.5 | -| [OpenLDAP]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/openldap/) | v2.0.5 | -| [Microsoft AD FS]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | -| [PingIdentity]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ping-federate/) | v2.0.7 | -| [Keycloak]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/keycloak/) | v2.1.0 | -| [Okta]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/okta/) | v2.2.0 | +| [Microsoft Active Directory]({{}}/rancher/v2.x/en/admin-settings/authentication/ad/) | v2.0.0 | +| [GitHub]({{}}/rancher/v2.x/en/admin-settings/authentication/github/) | v2.0.0 | +| [Microsoft Azure AD]({{}}/rancher/v2.x/en/admin-settings/authentication/azure-ad/) | v2.0.3 | +| [FreeIPA]({{}}/rancher/v2.x/en/admin-settings/authentication/freeipa/) | v2.0.5 | +| [OpenLDAP]({{}}/rancher/v2.x/en/admin-settings/authentication/openldap/) | v2.0.5 | +| [Microsoft AD FS]({{}}/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | +| [PingIdentity]({{}}/rancher/v2.x/en/admin-settings/authentication/ping-federate/) | v2.0.7 | +| [Keycloak]({{}}/rancher/v2.x/en/admin-settings/authentication/keycloak/) | v2.1.0 | +| [Okta]({{}}/rancher/v2.x/en/admin-settings/authentication/okta/) | v2.2.0 | +| [Google OAuth]({{}}/rancher/v2.x/en/admin-settings/authentication/google/) | v2.3.0 |
-However, Rancher also provides [local authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/local/). +However, Rancher also provides [local authentication]({{}}/rancher/v2.x/en/admin-settings/authentication/local/). In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. ## Users and Groups -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/). +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.x/en/admin-settings/rbac/). > **Note:** Local authentication does not support creating or managing groups. -For more information, see [Users and Groups]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) +For more information, see [Users and Groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) + +## Scope of Rancher Authorization + +After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: + +| Access Level | Description | +|----------------------------------------------|-------------| +| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | +| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | +| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | + +To set the Rancher access level for users in the authorization service, follow these steps: + +1. From the **Global** view, click **Security > Authentication.** + +1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. + +1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. + +1. Click **Save.** + +**Result:** The Rancher access configuration settings are applied. + +{{< saml_caveats >}} ## External Authentication Configuration and Principal Users @@ -51,22 +76,22 @@ Configuration of external authentication affects how principal users are managed 1. Sign into Rancher as the local principal and complete configuration of external authentication. - ![Sign In]({{< baseurl >}}/img/rancher/sign-in.png) + ![Sign In]({{}}/img/rancher/sign-in.png) 2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - ![Principal ID Sharing]({{< baseurl >}}/img/rancher/principal-ID.png) + ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) 3. After you complete configuration, Rancher automatically signs out the local principal. - ![Sign Out Local Principal]({{< baseurl >}}/img/rancher/sign-out-local.png) + ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) 4. Then, Rancher automatically signs you back in as the external principal. - ![Sign In External Principal]({{< baseurl >}}/img/rancher/sign-in-external.png) + ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) 5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - ![Sign In External Principal]({{< baseurl >}}/img/rancher/users-page.png) + ![Sign In External Principal]({{}}/img/rancher/users-page.png) 6. The external principal and the local principal share the same access rights. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md index 037a2fc6881..f74e1e8b0ce 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md @@ -5,19 +5,19 @@ aliases: - /rancher/v2.x/en/tasks/global-configuration/authentication/active-directory/ --- -If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. +If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/openldap) integration. > **Note:** -> +> > Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). ## Prerequisites You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. -Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. +Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. @@ -74,11 +74,12 @@ The table below details the parameters for the user schema section configuration | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for user objects in your domain. | +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Username Attribute | The user attribute whose value is suitable as a display name. | | Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | | User Member Attribute | The attribute containing the groups that a user is a member of. | | Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | +| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | | User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | | Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | @@ -92,11 +93,12 @@ The table below details the parameters for the group schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for group objects in your domain. | +| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Name Attribute | The group attribute whose value is suitable for a display name. | | Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | | Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | | Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | +| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | | Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | | Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members). | @@ -108,7 +110,7 @@ Once you have completed the configuration, proceed by testing the connection to > **Note:** > -> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned admin privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. +> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. 1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. 2. Click **Authenticate with Active Directory** to finalise the setup. @@ -124,7 +126,7 @@ Once you have completed the configuration, proceed by testing the connection to ## Annex: Identify Search Base and Schema using ldapsearch -In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hirarchy and schema of your AD server. +In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. @@ -146,7 +148,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: -![LDAP User]({{< baseurl >}}/img/rancher/ldapsearch-user.png) +{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. @@ -163,7 +165,7 @@ The output of the above `ldapsearch` query also allows to determine the correct > **Note:** > -> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. +> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. @@ -179,7 +181,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command will inform us on the attributes used for group objects: -![LDAP Group]({{< baseurl >}}/img/rancher/ldapsearch-group.png) +{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} Again, this allows us to determine the correct values to enter in the group schema configuration: diff --git a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md index 34f2f545d68..b4879220c29 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md @@ -9,6 +9,8 @@ _Available as of v2.0.3_ If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. +>**Note:** Azure AD integration only supports Service Provider initiated logins. + >**Prerequisite:** Have an instance of Azure AD configured. >**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md new file mode 100644 index 00000000000..5266e219d7e --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md @@ -0,0 +1,106 @@ +--- +title: Configuring Google OAuth +--- +_Available as of v2.3.0_ + +If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. + +Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. + +Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) can configure authentication. + +# Prerequisites +- You must have a [G Suite admin account](https://admin.google.com) configured. +- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. +- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) + +After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: +![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) + +# Setting up G Suite for OAuth with Rancher +Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: + +1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) +1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) +1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) +1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) + +### 1. Adding Rancher as an Authorized Domain +1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. +1. Select your project and click **OAuth consent screen.** +![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) +1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) +1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. + +**Result:** Rancher has been added as an authorized domain for the Admin SDK API. + +### 2. Creating OAuth2 Credentials for the Rancher Server +1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) +![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) +1. On the **Create Credentials** dropdown, select **OAuth client ID.** +1. Click **Web application.** +1. Provide a name. +1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. + - Under **Authorized JavaScript origins,** enter your Rancher server URL. + - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. +1. Click on **Create.** +1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. + +**Result:** Your OAuth credentials have been successfully created. + +### 3. Creating Service Account Credentials +Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. + +Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. + +As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. + +This section describes how to: + +- Create a service account +- Create a key for the service account and download the credentials as JSON + +1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. +1. Click on **Create Service Account.** +1. Enter a name and click **Create.** +![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) +1. Don't provide any roles on the **Service account permissions** page and click **Continue** +![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) +1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. +![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) + +**Result:** Your service account is created. + +### 4. Register the Service Account Key as an OAuth Client + +You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. + +Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: + +1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** + + ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) +1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) +1. Add the Unique ID obtained in the previous step in the **Client Name** field. +1. In the **One or More API Scopes** field, add the following scopes: + ``` + openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly + ``` +1. Click **Authorize.** + +**Result:** The service account is registered as an OAuth client in your G Suite account. + +# Configuring Google OAuth in Rancher +1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. +1. From the **Global** view, click **Security > Authentication** from the main menu. +1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. + 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. + 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. + 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. + - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) + - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. + - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. +1. Click **Authenticate with Google**. +1. Click **Save**. + +**Result:** Google authentication is successfully configured. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md index 6eb4373db2d..e7350e6c96d 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/keycloak/_index.md @@ -1,5 +1,6 @@ --- title: Configuring Keycloak (SAML) +description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins weight: 1200 --- _Available as of v2.1.0_ @@ -11,7 +12,7 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati - You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. - In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - Setting | Value + Setting | Value ------------|------------ `Sign Documents` | `ON` 1 `Sign Assertions` | `ON` 1 @@ -22,7 +23,8 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` >1: Optionally, you can enable either one or both of these settings. -- Export a `metadata.xml` file from your Keycloak client. From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. +- Export a `metadata.xml` file from your Keycloak client: + From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. ## Configuring Keycloak in Rancher @@ -46,7 +48,7 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati | IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > + > > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert @@ -62,7 +64,7 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the confiuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. ### You are not redirected to Keycloak @@ -88,3 +90,25 @@ You are correctly redirected to your IdP login page and you are able to enter yo * Check your Keycloak log. * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. + +### Keycloak 6.0.0+: IDPSSODescriptor missing from options + +Keycloak versions 6.0.0 and up no longer provide the IDP metadata under the `Installation` tab. +You can still get the XML from the following url: + +`https://{KEYCLOAK-URL}/auth/realms/{REALM-NAME}/protocol/saml/descriptor` + +The XML obtained from this URL contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: + + * Copy all the tags from `EntitiesDescriptor` to the `EntityDescriptor`. + * Remove the `` tag from the beginning. + * Remove the `` from the end of the xml. + +You are left with something similar as the example below: + +``` + + .... + + +``` diff --git a/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md index a5bde3dec91..3044cc298fd 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/local/_index.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.x/en/tasks/global-configuration/authentication/local-authentication/ --- -Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Ranchehr. By default, the `admin` user that logs in to Rancher for the first time is a local user. +Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Rancher. By default, the `admin` user that logs in to Rancher for the first time is a local user. ## Adding Local Users diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md index a05c9709e29..822a991e3e9 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md @@ -9,57 +9,57 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re 1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - + {{< img "/img/rancher/adfs/adfs-overview.png" "">}} 1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - + {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} 1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - + {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} 1. Select **AD FS profile** as the configuration profile for your relying party trust. - + {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} 1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - + {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} 1. Select **Enable support for the SAML 2.0 WebSSO protocol** and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - + {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} 1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} 1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - + {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} 1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} 1. After reviewing your settings, select **Next** to add the relying party trust. - + {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} 1. Select **Open the Edit Claim Rules...** and click **Close**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} 1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - + {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} 1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - + {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} 1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: @@ -70,7 +70,7 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re | Token-Groups - Qualified by Long Domain Name | Group | | SAM-Account-Name | Name |
- + {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} 1. Download the `federationmetadata.xml` from your AD server at: ``` diff --git a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md index b0af27f7f12..517cd8f6975 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md @@ -7,6 +7,8 @@ _Available as of v2.2.0_ If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. +>**Note:** Okta integration only supports Service Provider initiated logins. + ## Prerequisites In Okta, create a SAML Application with the settings below. See the [Okta documentation](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta) for help. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md index 2777c006cac..bce05911aac 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md @@ -22,7 +22,7 @@ If your organization uses LDAP for user authentication, you can configure Ranche ## Prerequisites -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an admin account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). > **Using TLS?** > @@ -75,7 +75,7 @@ The table below details the parameters for the user schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for user objects in your domain. | +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Username Attribute | The user attribute whose value is suitable as a display name. | | Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | | User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | @@ -93,7 +93,7 @@ The table below details the parameters for the group schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for group entries in your domain. | +| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Name Attribute | The group attribute whose value is suitable for a display name. | | Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | | Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | @@ -109,7 +109,7 @@ Once you have completed the configuration, proceed by testing the connection to > **Note:** > -> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned admin privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. +> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. 1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. 2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md index cc6a3b75791..8cb2508c681 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/ping-federate/_index.md @@ -9,9 +9,9 @@ If your organization uses Ping Identity Provider (IdP) for user authentication, >**Prerequisites:** > >- You must have a [Ping IdP Server](https://www.pingidentity.com/) configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/ping/saml/metadata` -Assertion Consure Service (ACS) URL: `https:///v1-saml/ping/saml/acs` +>- Following are the Rancher Service Provider URLs needed for configuration: +Metadata URL: `https:///v1-saml/ping/saml/metadata` +Assertion Consumer Service (ACS) URL: `https:///v1-saml/ping/saml/acs` >- Export a `metadata.xml` file from your IdP Server. For more information, see the [PingIdentity documentation](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). 1. From the **Global** view, select **Security > Authentication** from the main menu. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md index 6cc49ad058f..722452e5f63 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/user-groups/_index.md @@ -29,7 +29,7 @@ Rancher will periodically refresh the user information even before a user logs i - **`auth-user-info-max-age-seconds`** - This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This settting defaults to `3600` seconds, i.e. 1 hour. + This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. - **`auth-user-info-resync-cron`** @@ -49,3 +49,16 @@ If you are not sure the last time Rancher performed an automatic refresh of user **Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. >**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. + + +## Session Length + +_Available as of v2.3.0_ + +The default length (TTL) of each user session is adjustable. The default session length is 16 hours. + +1. From the **Global** view, click on **Settings**. +1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** +1. Enter the amount of time in minutes a session length should last and click **Save.** + +**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md b/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md new file mode 100644 index 00000000000..40773122cc3 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/config-private-registry/_index.md @@ -0,0 +1,44 @@ +--- +title: Configuring a Global Default Private Registry +weight: 400 +aliases: +--- + +You might want to use a private Docker registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the Docker images that are used in your clusters. + +There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. + +For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability) instructions. + +If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#provisioning-clusters-with-private-registries-that-require-credentials) every time you create a new cluster. + +# Setting a Private Registry with No Credentials as the Default Registry + +1. Log into Rancher and configure the default administrator password. + +1. Go into the **Settings** view. + + {{< img "/img/rancher/airgap/settings.png" "Settings" >}} + +1. Look for the setting called `system-default-registry` and choose **Edit**. + + {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} + +1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. + + {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} + +**Result:** Rancher will use your private registry to pull system images. + +# Setting a Private Registry with Credentials when Deploying a Cluster + +You can follow these steps to configure a private registry when you provision a cluster with Rancher: + +1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** +1. In the Enable Private Registries section, click **Enabled.** +1. Enter the registry URL and credentials. +1. Click **Save.** + +**Result:** The new cluster will be able to pull images from the private registry. diff --git a/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md new file mode 100644 index 00000000000..4f16e1aa458 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md @@ -0,0 +1,71 @@ +--- +title: Upgrading Kubernetes without Upgrading Rancher +weight: 1120 +--- + +_Available as of v2.3.0_ + +The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. + +> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. + +Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. + +This table below describes the CRDs that are affected by the periodic data sync. + +> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. + +| Resource | Description | Rancher API URL | +|----------|-------------|-----------------| +| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | +| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | +| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | + +Administrators might configure the RKE metadata settings to do the following: + +- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher +- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub +- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher + +### Refresh Kubernetes Metadata + +The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) + +To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. + +### Configuring the Metadata Synchronization + +> Only administrators can change these settings. + +The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. + +To edit the metadata config in Rancher, + +1. Go to the **Global** view and click the **Settings** tab. +1. Go to the **rke-metadata-config** section. Click the **Ellipsis (...)** and click **Edit.** +1. You can optionally fill in the following parameters: + + - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. + - `url`: This is the HTTP path that Rancher fetches data from. + - `branch`: This refers to the Git branch name if the URL is a Git URL. + +If you don't have an air gap setup, you don't need to specify the URL or Git branch where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata.git) + +However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. + +### Air Gap Setups + +Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) + +If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. + +To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings by updating the `url` and `branch` to point to the mirror. + +After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. + +1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +1. Download the OS specific image lists for Linux or Windows. +1. Download `rancher-images.txt`. +1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. + +**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md index 51b190231ef..2ff9bd75b55 100644 --- a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md @@ -17,6 +17,8 @@ _Pod Security Policies_ (or PSPs) are objects that control security-sensitive as - You can override the default PSP by assigning a different PSP directly to the project. - Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. +>**Note:** You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) + Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). >**Best Practice:** Set pod security at the cluster level. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md index 9872b85b113..591d1e2365d 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/_index.md @@ -11,7 +11,7 @@ The projects and clusters accessible to non-administrative users is determined b When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the user membership. +> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. ### Cluster Roles @@ -27,31 +27,50 @@ _Cluster roles_ are roles that you can assign to users, granting them access to #### Custom Cluster Roles -Rancher lets you assign _custom cluster roles_ to a user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a user within a cluster. See the table below for a list of built-in custom cluster roles. +Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. #### Cluster Role Reference -The following table lists each built-in custom cluster role available in Rancher and whether it is also granted by the `Owner` or `Member` role. +The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. | Built-in Cluster Role | Owner | Member | | ---------------------------------- | ------------- | --------------------------------- | +| Create Projects | ✓ | ✓ | +| Manage Cluster Backups             | ✓ | | +| Manage Cluster Catalogs | ✓ | | | Manage Cluster Members | ✓ | | -| Manage Cluster Catalogs | ✓ | | Manage Nodes | ✓ | | -| Manage Snapshots | ✓ || | Manage Storage | ✓ | | -| View All Projects | ✓ | | -| Create Project | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | +| View All Projects | ✓ | | | View Cluster Catalogs | ✓ | ✓ | +| View Cluster Members | ✓ | ✓ | | View Nodes | ✓ | ✓ | -| View Snapshots | ✓ | ✓ | -> **Notes:** -> ->- Each cluster role listed above, including `Owner` and `Member`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kuberenetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Cluster Members` role allows the user to manage any members of the cluster **and** grant them any cluster scoped role regardless of their access to the cluster resources. Be cautious when assigning this role out individually. +For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. + +> **Note:** +>When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Giving a Custom Cluster Role to a Cluster Member + +After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/#adding-a-custom-role) cluster owners and admins can then assign those roles to cluster members. + +To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. + +To assign the role to a new cluster member, + +1. Go to the **Cluster** view, then go to the **Members** tab. +1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create.** + +**Result:** The member has the assigned role. + +To assign any custom role to an existing cluster member, + +1. Go to the member you want to give the role to. Click the **Ellipsis (...) > View in API.** +1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** + +**Result:** The member has the assigned role. ### Project Roles @@ -70,13 +89,13 @@ _Project roles_ are roles that can be used to grant users access to a project. T These users can view everything in the project but cannot create, update, or delete anything. >**Caveat:** - > + > >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. #### Custom Project Roles -Rancher lets you assign _custom project roles_ to a user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a user within a project. See the table below for a list of built-in custom project roles. +Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. #### Project Role Reference @@ -107,8 +126,8 @@ The following table lists each built-in custom project role available in Rancher > **Notes:** > >- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kuberenetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the user to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. +>- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +>- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. ### Defining Custom Roles As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. @@ -117,7 +136,7 @@ When defining a custom role, you can grant access to specific resources or speci ### Default Cluster and Project Roles -By default, when a user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. +By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. There are two methods for changing default cluster/project roles: @@ -132,7 +151,7 @@ There are two methods for changing default cluster/project roles: >- Although you can [lock]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. >- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. -### Configuring Default Roles +### Configuring Default Roles for Cluster and Project Creators You can change the cluster or project role(s) that are automatically assigned to the creating user. @@ -142,12 +161,12 @@ You can change the cluster or project role(s) that are automatically assigned to 1. Enable the role as default. {{% accordion id="cluster" label="For Clusters" %}} -1. From **Clustor Creator Default**, choose **Yes: Default role for new cluster creation**. +1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. 1. Click **Save**. {{% /accordion %}} {{% accordion id="project" label="For Projects" %}} 1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. -1. Click **Save**. +1. Click **Save**. {{% /accordion %}} 1. If you want to remove a default role, edit the permission and select **No** from the default roles option. @@ -156,7 +175,7 @@ You can change the cluster or project role(s) that are automatically assigned to ### Cluster Membership Revocation Behavior -When you revoke the cluster membership for a user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the user can still: +When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - Access the projects they hold membership in. - Exercise any [individual project roles](#project-role-reference) they are assigned. diff --git a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md index 68f4f3c9cab..895330c46ea 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/_index.md @@ -2,70 +2,166 @@ title: Custom Roles weight: 1128 aliases: - - /rancher/v2.x/en/tasks/global-configuration/roles/ + - /rancher/v2.x/en/tasks/global-configuration/roles/ --- Within Rancher, _roles_ determine what actions a user can make within a cluster or project. Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. ->**Prerequisites:** -> ->To complete the tasks on this page, the following permissions are required: -> ->- [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). ->- [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. +This section covers the following topics: -## Adding A Custom Role +- [Prerequisites](#prerequisites) +- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) +- [Creating a custom global role that copies rules from an existing role](#creating-a-custom-global-role-that-copies-rules-from-an-existing-role) +- [Creating a custom global role that does not copy rules from another role](#creating-a-custom-global-role-that-does-not-copy-rules-from-another-role) +- [Deleting a custom global role](#deleting-a-custom-global-role) +- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) + +## Prerequisites + +To complete the tasks on this page, one of the following permissions are required: + + - [Administrator Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). + - [Custom Global Permissions]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/#global-permissions-reference) role assigned. + +## Creating A Custom Role for a Cluster or Project While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. -1. From the **Global** view, select **Security > Roles** from the main menu. +The steps to add custom roles differ depending on the version of Rancher. -1. **v2.0.7 and later only:** Select a tab to determine the scope of the roles you're adding. The tabs are: +{{% tabs %}} +{{% tab "Rancher v2.0.7+" %}} - - **Cluster** +1. From the **Global** view, select **Security > Roles** from the main menu. - The role is valid for assignment when adding/managing members to _only_ clusters. +1. Select a tab to determine the scope of the roles you're adding. The tabs are: - - **Project** + - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. + - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. - The role is valid for assignment when adding/managing members to _only_ projects. +1. Click **Add Cluster/Project Role.** - >**Note:** You cannot edit the Global tab. +1. **Name** the role. -1. Click **Add Cluster/Project Role**. +1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. -1. **Name** the role. + > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. -1. Choose whether to set the role to a status of [locked]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - Locked roles cannot be assigned to users. + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. -1. **v2.0.7 and later only:** Choose a **Cluster/Project Creator Default** option setting. Use this option to set if the role is assigned to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - >**Note:** Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. +1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. -1. **v2.0.6 and earlier only:** Assign the role a **Context**. Context determines the scope of role assigned to the user. The contexts are: +1. Click **Create**. - - **All** +{{% /tab %}} +{{% tab "Rancher prior to v2.0.7" %}} - The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. +1. From the **Global** view, select **Security > Roles** from the main menu. - - **Cluster** +1. Click **Add Role**. - This role is valid for assignment when adding/managing members to _only_ clusters. +1. **Name** the role. - - **Project** +1. Choose whether to set the role to a status of [locked]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - This role is valid for assignment when adding/managing members to _only_ projects. + > **Note:** Locked roles cannot be assigned to users. -6. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. +1. In the **Context** dropdown menu, choose the scope of the role assigned to the user. The contexts are: - >**Note:** When viewing the resources associated with default roles created by Rancher, if there are multiple Kuberenetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + - **All:** The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + - **Cluster:** This role is valid for assignment when adding/managing members to _only_ clusters. -7. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. + - **Project:** This role is valid for assignment when adding/managing members to _only_ projects. -8. Click **Create**. +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. + + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + +1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. + +1. Click **Create**. + +{{% /tab %}} +{{% /tabs %}} + +## Creating a Custom Global Role that Copies Rules from an Existing Role + +_Available as of v2.4.0-alpha1_ + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. + +The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. + +To create a custom global role based on an existing role, + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, go to the role that the custom global role will be based on. Click **Ellipsis (…) > Clone.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. +1. Click **Save.** + +## Creating a Custom Global Role that Does Not Copy Rules from Another Role + +_Available as of v2.4.0-alpha1_ + +Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, click **Add Global Role.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. +1. Click **Save.** + +## Deleting a Custom Global Role + +_Available as of v2.4.0-alpha1_ + +When deleting a custom global role, all global role bindings with this custom role are deleted. + +If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. + +Custom global roles can be deleted, but built-in roles cannot be deleted. + +To delete a custom global role, + +1. Go to the **Global** view and click **Security > Roles.** +2. On the **Global** tab, go to the custom global role that should be deleted and click **Ellipsis (…) > Delete.** +3. Click **Delete.** + +## Assigning a Custom Global Role to a Group + +_Available as of v2.4.0-alpha1_ + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. + +When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) +> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Custom** section, choose any custom global role that will be assigned to the group. +1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md index 51da015a34e..4f754a97d37 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/global-permissions/_index.md @@ -3,76 +3,170 @@ title: Global Permissions weight: 1126 --- -Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are two default global permissions: `Administrator` and `Standard User`. - -- **Administrator:** - - These users have full control over the entire Rancher system and all clusters within it. - -- **Standard User:** - - These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. - ->**Note:** You cannot create, update, or delete Global Permissions. - -### Global Permission Assignment - -Assignment of global permissions to a user depends on their authentication source: external or local. - -- **External Authentication** - - When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the `Standard User` global permission. - -- **Local Authentication** - - When you create a new local user, you assign them a global permission as you complete the **Add User** form. - -### Custom Global Permissions - -Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a custom set of permissions. - _Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. -Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. See the [table below](#global-permissions-reference) for a list of individual permissions available. +Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are two default global permissions: `Administrator` and `Standard User`. -### Global Permissions Reference +- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. -The following table lists each custom global permission available and whether it is assigned to the default global permissions, `Administrator` and `Standard User`. +- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. + +You cannot update or delete the built-in Global Permissions. + +This section covers the following topics: + +- [Global permission assignment](#global-permission-assignment) + - [Global permissions for new local users](#global-permissions-for-new-local-users) + - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) +- [Custom global permissions](#custom-global-permissions) + - [Custom global permissions reference](#custom-global-permissions-reference) + - [Configuring default global permissions for new users](#configuring-default-global-permissions) + - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) + - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + - [Refreshing group memberships](#refreshing-group-memberships) + +# Global Permission Assignment + +Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. + +### Global Permissions for New Local Users + +When you create a new local user, you assign them a global permission as you complete the **Add User** form. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) + +### Global Permissions for Users with External Authentication + +When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) + +Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) + +As of Rancher v2.4.0-alpha1, you can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. + +# Custom Global Permissions + +Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. + +When a user from an [external authentication source]({{}}/rancher/v2.x/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. + +However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. + +The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. + +Administrators can enforce custom global permissions in multiple ways: + +- [Changing the default permissions for new users](#configuring-default-global-permissions) +- [Editing the permissions of an existing user](#configuring-global-permissions-for-individual-users) +- [Assigning a custom global permission to a group](#assigning-a-custom-global-permission-to-a-group) + +### Custom Global Permissions Reference + +The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator` and `Standard User`. | Custom Global Permission | Administrator | Standard User | | ---------------------------------- | ------------- | ------------- | +| Create Clusters | ✓ | ✓ | +| Create RKE Templates | ✓ | ✓ | | Manage Authentication | ✓ | | | Manage Catalogs | ✓ | | | Manage Cluster Drivers | ✓ | | | Manage Node Drivers | ✓ | | | Manage PodSecurityPolicy Templates | ✓ | | | Manage Roles | ✓ | | +| Manage Settings | ✓ | | | Manage Users | ✓ | | -| Create Clusters | ✓ | ✓ | -| Use Catalog Templates | ✓ | ✓ | -| Login Access | ✓ | ✓ | +| Use Catalog Templates | ✓ | ✓ | +| User Base\* (Basic log-in access) | ✓ | ✓ | -> **Notes:** +> \*This role has two names: > ->- Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kuberenetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. +> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. -When a user from an [external authentication source]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, new users are assigned the [user](#user) permissions. However, in some organizations, these permissions may extend too much access. In this use case, you can change the default permissions to something more restrictive, such as a set of individual permissions. +For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. -You can assign one or more default permissions. For example, the `user` permission assigns new users a [set of individual global permissions](#global-permissions-reference). If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. - ->**Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. +> **Notes:** +> +> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. +> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ### Configuring Default Global Permissions -You can change the default global permissions that are assigned to external users upon their first log in. +If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. + +> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. + +To change the default global permissions that are assigned to external users upon their first log in, follow these steps: 1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. -1. Find the permissions set that you want to use as default. Then edit the permission by selecting **Ellipsis > Edit**. +1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **Ellipsis > Edit**. -1. Select **Yes: Default role for new users** and then click **Save**. +1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. 1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. **Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. + +### Configuring Global Permissions for Existing Individual Users + +To configure permission for a user, + +1. Go to the **Users** tab. + +1. On this page, go to the user whose access level you want to change and click **Ellipsis (...) > Edit.** + +1. In the **Global Permissions** section, click **Custom.** + +1. Check the boxes for each subset of permissions you want the user to have access to. + +1. Click **Save.** + +> **Result:** The user's global permissions have been updated. + +### Configuring Global Permissions for Groups + +_Available as of v2.4.0-alpha1_ + +If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. + +After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. + +For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) + +For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,]((#refreshing-group-memberships)) whichever comes first. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/#external-vs-local-authentication) +> * The external authentication provider supports [user groups]({{}}/rancher/v2.x/en/admin-settings/authentication/user-groups/) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. + +### Refreshing Group Memberships + +When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. + +To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. + +An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. + +To refresh group memberships, + +1. From the **Global** view, click **Security > Users.** +1. Click **Refresh Group Memberships.** + +**Result:** Any changes to the group members' permissions will take effect. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md index d3e61d8b2ea..91ea1123625 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md @@ -27,7 +27,7 @@ If you want to prevent a role from being assigned to users, you can set it to a You can lock roles in two contexts: -- When you're [adding a custom role](({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). +- When you're [adding a custom role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - When you editing an existing role (see below). 1. From the **Global** view, select **Security** > **Roles**. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md new file mode 100644 index 00000000000..3c0d8cbcdc5 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/_index.md @@ -0,0 +1,127 @@ +--- +title: RKE Templates +weight: 7010 +--- + +_Available as of Rancher v2.3.0_ + +RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. + +RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. + +With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. + +RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. + +Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. + +If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. + +The core features of RKE templates allow DevOps and security teams to: + +- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices +- Prevent less technical users from making uninformed choices when provisioning clusters +- Share different templates with different sets of users and groups +- Delegate ownership of templates to users who are trusted to make changes to them +- Control which users can create templates +- Require users to create clusters from a template + +# Configurable Settings + +RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: + +- Cloud provider options +- Pod security options +- Network providers +- Ingress controllers +- Network security configuration +- Network plugins +- Private registry URL and credentials +- Add-ons +- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services + +The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. + +# Scope of RKE Templates + +RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. + +RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. + + +# Example Scenarios +When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. + +These [example scenarios]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. + +Some of the example scenarios include the following: + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) + +# Template Management + +When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. + +Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. + +RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. + +In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. + +For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. + +The documents in this section explain the details of RKE template management: + +- [Getting permission to create templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/) +- [Creating and revising templates]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/) +- [Enforcing template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) +- [Overriding template settings]({{}}/rancher/v2.x/en/admin-settings/rke-templates/overrides/) +- [Sharing templates with cluster creators]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) +- [Sharing ownership of a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) + +An [example YAML configuration file for a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml) is provided for reference. + +# Applying Templates + +You can [create a cluster from a template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-a-cluster-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) + +If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +# Standardizing Hardware + +RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware). + +# YAML Customization + +If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.x/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. + +The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. + +For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) + +### Add-ons + +The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). + +The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. + +Some things you could do with add-ons include: + +- Install applications on the Kubernetes cluster after it starts +- Install plugins on nodes that are deployed with a Kubernetes daemonset +- Automatically set up namespaces, service accounts, or role binding + +The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md new file mode 100644 index 00000000000..1a3010aefe0 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/_index.md @@ -0,0 +1,63 @@ +--- +title: Applying Templates +weight: 50 +--- + +You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) + +RKE templates can be applied to new clusters. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +This section covers the following topics: + +- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) +- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) +- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) + +### Creating a Cluster from an RKE Template + +To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: + +1. From the **Global** view, go to the **Clusters** tab. +1. Click **Add Cluster** and choose the infrastructure provider. +1. Provide the cluster name and node template details as usual. +1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** +1. Choose an existing template and revision from the dropdown menu. +1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. +1. Click **Save** to launch the cluster. + +### Updating a Cluster Created with an RKE Template + +When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. + +- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) +- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. + +If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. + +As of Rancher v2.3.3, an existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. + +> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +### Converting an Existing Cluster to Use an RKE Template + +_Available as of v2.3.3_ + +This section describes how to create an RKE template from an existing cluster. + +RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **Ellipsis (...)** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md new file mode 100644 index 00000000000..1c0c4711596 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/_index.md @@ -0,0 +1,162 @@ +--- +title: Creating and Revising Templates +weight: 32 +--- + +This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** + +Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. + +Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. + +The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a template](#creating-a-template) +- [Updating a template](#updating-a-template) +- [Deleting a template](#deleting-a-template) +- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) +- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) +- [Disabling a template revision](#disabling-a-template-revision) +- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) +- [Setting a template revision as default](#setting-a-template-revision-as-default) +- [Deleting a template revision](#deleting-a-template-revision) +- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) +- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) + +### Prerequisites + +You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions) + +You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) + +### Creating a Template + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Click **Add Template.** +1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. +1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users) You can also make the template public to share with everyone in the Rancher setup. +1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. + +**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters). + +### Updating a Template + +When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. + +You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) + +When new template revisions are created, clusters using an older revision of the template are unaffected. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to edit and click the **Vertical Ellipsis (...) > Edit.** +1. Edit the required information and click **Save.** +1. Optional: You can change the default revision of this template and also change who it is shared with. + +**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) + +### Deleting a Template + +When you no longer use an RKE template for any of your clusters, you can delete it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to delete and click the **Vertical Ellipsis (...) > Delete.** +1. Confirm the deletion when prompted. + +**Result:** The template is deleted. + +### Creating a Revision Based on the Default Revision + +You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to clone and click the **Vertical Ellipsis (...) > New Revision From Default.** +1. Complete the rest of the form to create a new revision. + +**Result:** The RKE template revision is cloned and configured. + +### Creating a Revision Based on a Cloned Revision + +When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to clone. Then select **Ellipsis > Clone Revision.** +1. Complete the rest of the form. + +**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. + +### Disabling a Template Revision + +When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. + +You can disable the revision if it is not being used by any cluster. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to disable. Then select **Ellipsis > Disable.** + +**Result:** The RKE template revision cannot be used to create a new cluster. + +### Re-enabling a Disabled Template Revision + +If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to re-enable. Then select **Ellipsis > Enable.** + +**Result:** The RKE template revision can be used to create a new cluster. + +### Setting a Template Revision as Default + +When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. + +To set an RKE template revision as default, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be default and click the **Ellipsis (...) > Set as Default.** + +**Result:** The RKE template revision will be used as the default option when clusters are created with the template. + +### Deleting a Template Revision + +You can delete all revisions of a template except for the default revision. + +To permanently delete a revision, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be deleted and click the **Ellipsis (...) > Delete.** + +**Result:** The RKE template revision is deleted. + +### Upgrading a Cluster to Use a New Template Revision + +> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates) +> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. + +To upgrade a cluster to use a new template revision, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that you want to upgrade and click **Ellipsis (...) > Edit.** +1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. +1. Click **Save.** + +**Result:** The cluster is upgraded to use the settings defined in the new template revision. + +### Exporting a Running Cluster to a New RKE Template and Revision + +You can save an existing cluster's settings as an RKE template. + +This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **Ellipsis (...)** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md new file mode 100644 index 00000000000..30b58bebd98 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/_index.md @@ -0,0 +1,50 @@ +--- +title: Template Creator Permissions +weight: 10 +--- + +Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. + +For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/). + +# Giving Users Permission to Create Templates + +Templates can only be created by users who have the global permission **Create RKE Templates.** + +Administrators have the global permission to create templates, and only administrators can give that permission to other users. + +For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) + +Administrators can give users permission to create RKE templates in two ways: + +- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) +- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) + +### Allowing a User to Create Templates + +An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **Vertical Ellipsis (...) > Edit.** +1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** + +**Result:** The user has permission to create RKE templates. + +### Allowing New Users to Create Templates by Default + +Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. + +1. From the **Global** view, click **Security > Roles.** +1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **Vertical Ellipsis (...) > Edit**. +1. Select the option **Yes: Default role for new users** and click **Save.** + +**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. + +### Revoking Permission to Create Templates + +Administrators can remove a user's permission to create templates with the following steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **Vertical Ellipsis (...) > Edit.** +1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. +1. Click **Save.** + +**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md new file mode 100644 index 00000000000..4f686c0222a --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md @@ -0,0 +1,38 @@ +--- +title: Template Enforcement +weight: 32 +--- + +This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. + +By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, + +- Only an administrator has the ability to create clusters without a template. +- All standard users must use an RKE template to create a new cluster. +- Standard users cannot create a cluster without using a template. + +Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) + +After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) + +# Requiring New Clusters to Use an RKE Template + +You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. + +To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** +1. Set the value to **True** and click **Save.** + +**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. + +# Disabling RKE Template Enforcement + +To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** +1. Set the value to **False** and click **Save.** + +**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md new file mode 100644 index 00000000000..4e93e102c62 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/_index.md @@ -0,0 +1,71 @@ +--- +title: Example Scenarios +weight: 5 +--- + +These example scenarios describe how an organization could use templates to standardize cluster creation. + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) + + +# Enforcing a Template Setting for Everyone + +Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. + +1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. +1. The administrator makes the template public. +1. The administrator turns on template enforcement. + +**Results:** + +- All Rancher users in the organization have access to the template. +- All new clusters created by [standard users]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. +- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. + +In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. + +# Templates for Basic and Advanced Users + +Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. + +1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-a-cluster-template) This means that every [standard user]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. +1. The administrator then creates two templates: + + - One template for basic users, with almost every option specified except for access keys + - One template for advanced users, which has most or all options has **Allow User Override** turned on + +1. The administrator shares the advanced template with only the advanced users. +1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. + +**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. + +# Updating Templates and Clusters Created with Them + +Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. + +In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. + +The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: + +- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. +- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. +- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. + +# Allowing Other Users to Control and Share a Template + +Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. + +Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. + +To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) + +The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: + +- [Revise the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change +- [Disable outdated revisions]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it +- [Delete the whole template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction +- [Set a certain revision as default]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. +- [Share the template]({{}}/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md new file mode 100644 index 00000000000..3c85e86d616 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md @@ -0,0 +1,112 @@ +--- +title: Example YAML +weight: 60 +--- + +Below is an example RKE template configuration file for reference. + +The YAML in the RKE template uses the same customization that is used when you create an RKE cluster. However, since the YAML is within the context of a Rancher provisioned RKE cluster, the customization from the RKE docs needs to be nested under the `rancher_kubernetes_engine` directive. + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker + +enable_cluster_alerting: false +# This setting is not enforced. Clusters +# created with this sample template +# would have alerting turned off by default, +# but end users could still turn alerting +# on or off. + +enable_cluster_monitoring: true +# This setting is not enforced. Clusters +# created with this sample template +# would have monitoring turned on +# by default, but end users could still +# turn monitoring on or off. + +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md new file mode 100644 index 00000000000..bb5f00d4b9e --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/overrides/_index.md @@ -0,0 +1,15 @@ +--- +title: Overriding Template Settings +weight: 33 +--- + +When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** + +After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. + +When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. + +The **Allow User Override** model of the RKE template is useful for situations such as: + +- Administrators know that some settings will need the flexibility to be frequently updated over time +- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md new file mode 100644 index 00000000000..67ca181a964 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md @@ -0,0 +1,70 @@ +--- +title: RKE Templates and Infrastructure +weight: 90 +--- + +In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. + +Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. + +If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. + +### Node Templates + +[Node templates]({{}}/rancher/v2.x/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. + +### Terraform + +Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. + +This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. + +Terraform allows you to: + +- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates +- Leverage catalog apps and multi-cluster apps +- Codify infrastructure across many platforms, including Rancher and major cloud providers +- Commit infrastructure-as-code to version control +- Easily repeat configuration and setup of infrastructure +- Incorporate infrastructure changes into standard development practices +- Prevent configuration drift, in which some servers become configured differently than others + +# How Does Terraform Work? + +Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. + +To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. + +Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. + +When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. + +# Tips for Working with Terraform + +- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) + +- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. + +- You can also modify auth in the Terraform provider. + +- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. + +- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. + +# Tip for Creating CIS Benchmark Compliant Clusters + +This section describes one way that you can make security and compliance-related config files standard in your clusters. + +When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.x/en/security/) you have an encryption config file and an audit log config file. + +Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. + +Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. + +In this way, you can create flags that comply with the CIS benchmark. + +# Resources + +- [Terraform documentation](https://www.terraform.io/docs/) +- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) +- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md new file mode 100644 index 00000000000..a86d8219a85 --- /dev/null +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/_index.md @@ -0,0 +1,61 @@ +--- +title: Access and Sharing +weight: 31 +--- + +If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. + +Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. + +When you share a template, each user can have one of two access levels: + +- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. +- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. + +If you create a template, you automatically become an owner of that template. + +If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising) + +There are several ways to share templates: + +- Add users to a new RKE template during template creation +- Add users to an existing RKE template +- Make the RKE template public, sharing it with all users in the Rancher setup +- Share template ownership with users who are trusted to modify the template + +### Sharing Templates with Specific Users or Groups + +To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **Vertical Ellipsis (...) > Edit.** +1. In the **Share Template** section, click on **Add Member**. +1. Search in the **Name** field for the user or group you want to share the template with. +1. Choose the **User** access type. +1. Click **Save.** + +**Result:** The user or group can create clusters using the template. + +### Sharing Templates with All Users + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **Vertical Ellipsis (...) > Edit.** +1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** + +**Result:** All users in the Rancher setup can create clusters using the template. + +### Sharing Ownership of Templates + +If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. + +In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. + +To give Owner access to a user or group, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to share and click the **Vertical Ellipsis (...) > Edit.** +1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. +1. In the **Access Type** field, click **Owner.** +1. Click **Save.** + +**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/content/rancher/v2.x/en/api/_index.md b/content/rancher/v2.x/en/api/_index.md index 084287dc540..97a0c5a6489 100644 --- a/content/rancher/v2.x/en/api/_index.md +++ b/content/rancher/v2.x/en/api/_index.md @@ -11,6 +11,8 @@ The API has its own user interface accessible from a web browser. This is an ea API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.x/en/api/api-tokens). + ## Making requests The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). diff --git a/content/rancher/v2.x/en/api/api-tokens/_index.md b/content/rancher/v2.x/en/api/api-tokens/_index.md new file mode 100644 index 00000000000..eb823bb7cea --- /dev/null +++ b/content/rancher/v2.x/en/api/api-tokens/_index.md @@ -0,0 +1,29 @@ +--- +title: API Tokens +weight: 1 +--- + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password. + +You can deactivate API tokens by deleting them or by deactivating the user account. + +To delete a token, + +1. Go to the list of all tokens in the Rancher API view at `https:///v3/tokens`. + +1. Access the token you want to delete by its ID. For example, `https:///v3/tokens/kubectl-shell-user-vqkqt` + +1. Click **Delete.** + +Here is the complete list of tokens that are generated with `ttl=0`: + +| Token | Description | +|-------|-------------| +| `kubeconfig-*` | Kubeconfig token | +| `kubectl-shell-*` | Access to `kubectl` shell in the browser | +| `agent-*` | Token for agent deployment | +| `compose-token-*` | Token for compose | +| `helm-token-*` | Token for Helm chart deployment | +| `*-pipeline*` | Pipeline token for project | +| `telemetry-*` | Telemetry token | +| `drain-node-*` | Token for drain (we use `kubectl` for drain because there is no native Kubernetes API) | diff --git a/content/rancher/v2.x/en/backups/backups/_index.md b/content/rancher/v2.x/en/backups/backups/_index.md index aaf8226cd97..9ef3beb47d8 100644 --- a/content/rancher/v2.x/en/backups/backups/_index.md +++ b/content/rancher/v2.x/en/backups/backups/_index.md @@ -7,7 +7,7 @@ aliases: --- This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. -- [Single Node Install Backups](./single-node-backups/) -- [High Availability Install Backups](./ha-backups/) +- [Docker Install Backups](./single-node-backups/) +- [Kubernetes Install Backups](./ha-backups/) If you are looking to back up your [Rancher launched Kubernetes cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/backing-up-etcd/). diff --git a/content/rancher/v2.x/en/backups/backups/ha-backups/_index.md b/content/rancher/v2.x/en/backups/backups/ha-backups/_index.md index aa388b7b843..08fce60c0b2 100644 --- a/content/rancher/v2.x/en/backups/backups/ha-backups/_index.md +++ b/content/rancher/v2.x/en/backups/backups/ha-backups/_index.md @@ -1,8 +1,8 @@ --- -title: Creating Backups—High Availability Installs +title: Creating Backups for Rancher Installed on Kubernetes weight: 50 aliases: - - /rancher/v2.x/en/installation/after-installation/ha-backup-and-restoration/ + - /rancher/v2.x/en/installation/after-installation/k8s-install-backup-and-restoration/ - /rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration/ --- This section describes how to create backups of your high-availability Rancher install. @@ -61,6 +61,7 @@ To take recurring snapshots, enable the `etcd-snapshot` service, which is a serv access_key: "myaccesskey" secret_key: "myaccesssecret" bucket_name: "my-backup-bucket" + folder: "folder-name" # Available as of v2.3.0 endpoint: "s3.eu-west-1.amazonaws.com" region: "eu-west-1" ``` @@ -112,7 +113,8 @@ _Available as of RKE v0.2.0_ ```shell rke etcd snapshot-save --config rancher-cluster.yml --name snapshot-name \ --s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ - --bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com + --bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com \ + --folder folder-name # Available as of v2.3.0 ``` **Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. It is also uploaded to the S3 compatible backend. diff --git a/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md b/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md index 1ac352d5a3c..e86291f230f 100644 --- a/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md +++ b/content/rancher/v2.x/en/backups/backups/single-node-backups/_index.md @@ -1,19 +1,18 @@ --- -title: Creating Backups—Single Node Installs +title: Creating Backups for Rancher Installed with Docker weight: 25 aliases: - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/ --- -After completing your single node installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. +After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. ## Before You Start During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: ``` -docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher +docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher ``` In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. @@ -54,7 +53,7 @@ This procedure creates a backup that you can restore if Rancher encounters a dis 1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each [placeholder](#before-you-start). ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher ``` **Step Result:** A stream of commands runs on the screen. @@ -69,4 +68,4 @@ This procedure creates a backup that you can restore if Rancher encounters a dis docker start ``` -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Single Node Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.x/en/backups/restorations/_index.md b/content/rancher/v2.x/en/backups/restorations/_index.md index 7dc3294f1bd..52fd8cab149 100644 --- a/content/rancher/v2.x/en/backups/restorations/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/_index.md @@ -4,7 +4,7 @@ weight: 1010 --- If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. -- [Restoring Backups—Single Node Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration/) -- [Restoring Backups—High Availability Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration/) +- [Restoring Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/single-node-restoration/) +- [Restoring Backups—Kubernetes installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration/) If you are looking to restore your [Rancher launched Kubernetes cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), please refer [here]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md index f51bca614a3..ac30f5113c5 100644 --- a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md @@ -1,5 +1,6 @@ --- -title: Restoring Backups—High Availability Installs +title: Restoring Backups—Kubernetes installs +shortTitle: Kubernetes Installs weight: 370 aliases: - /rancher/v2.x/en/installation/after-installation/ha-backup-and-restoration/ @@ -25,7 +26,7 @@ Additionally, the `pki.bundle.tar.gz` file usage is no longer required as v0.2.0 You will need [RKE]({{< baseurl >}}/rke/latest/en/installation/) and [kubectl]({{< baseurl >}}/rancher/v2.x/en/faq/kubectl/) CLI utilities installed. -Prepare by creating 3 new nodes to be the target for the restored Rancher instance. See [HA Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/) for node requirements. +Prepare by creating 3 new nodes to be the target for the restored Rancher instance. See [Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/) for node requirements. We recommend that you start with fresh nodes and a clean state. Alternatively you can clear Kubernetes and Rancher configurations from the existing nodes. This will destroy the data on these nodes. See [Node Cleanup]({{< baseurl >}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) for the procedure. @@ -115,7 +116,8 @@ When restoring etcd from a snapshot located in an S3 compatible backend, the com ``` $ rke etcd snapshot-restore --config cluster.yml --name snapshot-name \ --s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com +--bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com \ +--folder folder-name # Available as of v2.3.0 ``` #### Options for `rke etcd snapshot-restore` @@ -131,6 +133,7 @@ S3 specific options are only available for RKE v0.2.0+. | `--access-key` value | Specify s3 accessKey | *| | `--secret-key` value | Specify s3 secretKey | *| | `--bucket-name` value | Specify s3 bucket name | *| +| `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| | `--region` value | Specify the s3 bucket location (optional) | *| | `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | | `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | @@ -229,6 +232,6 @@ rke up --config ./rancher-cluster-restore.yml #### Finishing Up -Rancher should now be running and available to manage your Kubernetes clusters. Swap your Rancher DNS or Load Balancer endpoints to target the new cluster. Once this is done the agents on your managed clusters should automatically reconnect. This may take 10-15 minutes due to reconnect back off timeouts. +Rancher should now be running and available to manage your Kubernetes clusters. Review the [recommended architecture]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/#recommended-architecture) for Kubernetes installations and update the endpoints for Rancher DNS or the Load Balancer that you built during Step 1 of the Kubernetes install ([1. Create Nodes and Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/#load-balancer)) to target the new cluster. Once the endpoints are updated, the agents on your managed clusters should automatically reconnect. This may take 10-15 minutes due to reconnect back off timeouts. > **IMPORTANT:** Remember to save your new RKE config (`rancher-cluster-restore.yml`) and `kubectl` credentials (`kube_config_rancher-cluster-restore.yml`) files in a safe place for future maintenance. diff --git a/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md b/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md index 00ecf9e2196..9034877c2e4 100644 --- a/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/single-node-restoration/_index.md @@ -1,6 +1,6 @@ --- -title: Restoring Backups—Single Node Installs -shortTitle: Singe Node Installs +title: Restoring Backups—Docker Installs +shortTitle: Docker Installs weight: 365 aliases: - /rancher/v2.x/en/installation/after-installation/single-node-backup-and-restoration/ @@ -15,7 +15,7 @@ During restoration of your backup, you'll enter a series of commands, filling pl ``` docker run --volumes-from -v $PWD:/backup \ busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar zxvf /backup/rancher-data-backup--" +tar pzxvf /backup/rancher-data-backup--" ``` In this command, `` and `-` are environment variables for your Rancher deployment. @@ -46,9 +46,9 @@ Using a [backup]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-bac ``` docker stop ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Single Node Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Creating Backups—Single Node Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. + If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. 1. Enter the following command to delete your current state data and replace it with your backup data, replacing the [placeholders](#before-you-start). Don't forget to close the quotes. @@ -57,7 +57,7 @@ Using a [backup]({{< baseurl >}}/rancher/v2.x/en/backups/backups/single-node-bac ``` docker run --volumes-from -v $PWD:/backup \ busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar zxvf /backup/rancher-data-backup--.tar.gz" + tar pzxvf /backup/rancher-data-backup--.tar.gz" ``` **Step Result:** A series of commands should run. diff --git a/content/rancher/v2.x/en/best-practices/containers/_index.md b/content/rancher/v2.x/en/best-practices/containers/_index.md index c526e79b73a..83f1cc182ec 100644 --- a/content/rancher/v2.x/en/best-practices/containers/_index.md +++ b/content/rancher/v2.x/en/best-practices/containers/_index.md @@ -17,8 +17,6 @@ Smaller distributions such as Alpine and BusyBox reduce container image size and Popular distributions such as Ubuntu, Fedora, and CentOS are more field-tested and offer more functionality. -Another option is RancherOS, an operating system composed entirely of Docker containers. Everything in RancherOS is a container managed by Docker. This includes system services such as udev and syslog. RancherOS includes only the bare minimum amount of software needed to run Docker, decreasing complexity and boot time. The small code base and decreased attack surface of RancherOS also improves security. For details, you can refer to the [RancherOS docs]({{< baseurl >}}/os/v1.x/en/). - ### Start with a FROM scratch container If your microservice is a standalone static binary, you should use a FROM scratch container. @@ -32,11 +30,11 @@ When possible, use a non-privileged user when running processes within your cont ### Define Resource Limits Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. -In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the [Rancher docs]({{}}rancher/v2.x/en/project-admin/resource-quotas/). +In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the Rancher docs. When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. -The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the [namespace level](https://kubernetes.io/docs/concepts/policy/resource-quotas/). +The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the namespace level. ### Define Resource Requirements You should apply CPU and memory requirements to your pods. This is crucial for informing the scheduler which type of compute node your pod needs to be placed on, and ensuring it does not over-provision that node. In Kubernetes, you can set a resource requirement by defining `resources.requests` in the resource requests field in a pod's container spec. For details, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). diff --git a/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md b/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md index 9a8a6bf5b19..cd6d01bb1c4 100644 --- a/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md +++ b/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md @@ -11,9 +11,9 @@ There are two recommended deployment strategies. Each one has its own pros and c # Hub & Spoke Strategy --- -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run in an HA (high-availability) configuration, and there would be impact due to latencies. +In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. -![Hub and Spoke Deployment]({{< baseurl >}}/img/rancher/bpg/hub-and-spoke.png) +{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} ### Pros @@ -30,7 +30,7 @@ In this deployment scenario, there is a single Rancher control plane managing Ku --- In the regional deployment model a control plane is deployed in close proximity to the compute nodes. -![Regional Deployment]({{< baseurl >}}/img/rancher/bpg/regional.png) +{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} ### Pros diff --git a/content/rancher/v2.x/en/best-practices/deployment-types/_index.md b/content/rancher/v2.x/en/best-practices/deployment-types/_index.md index c324c613241..82d177cbcaf 100644 --- a/content/rancher/v2.x/en/best-practices/deployment-types/_index.md +++ b/content/rancher/v2.x/en/best-practices/deployment-types/_index.md @@ -3,26 +3,32 @@ title: Tips for Running Rancher weight: 100 --- -A high-availability (HA) installation, defined as an installation of at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. +A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. When you set up your high-availability Rancher installation, consider the following: ### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in your Rancher HA cluster. +Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. ### Don't Run Rancher on a Hosted Kubernetes Environment -Don't run Rancher HA in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. +When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE]({{}}/rke/latest/en/etcd-snapshots/) or [Rancher]({{}}/rancher/v2.x/en/backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. +### Make sure nodes are configured correctly for Kubernetes ### +It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) + +### When using RKE: Backup the Statefile +RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. + ### Run All Nodes in the Cluster in the Same Datacenter For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. ### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of your Rancher HA cluster mirrors your production environment as closely as possible in terms of software and hardware configuration. +It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. ### Monitor Your Clusters to Plan Capacity -You should run Rancher HA within the [system and hardware requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. diff --git a/content/rancher/v2.x/en/best-practices/management/_index.md b/content/rancher/v2.x/en/best-practices/management/_index.md index 70168c3e2ad..fe7f5f75bf4 100644 --- a/content/rancher/v2.x/en/best-practices/management/_index.md +++ b/content/rancher/v2.x/en/best-practices/management/_index.md @@ -15,7 +15,7 @@ Rancher is container-based and can potentially run on any Linux-based operating ### Upgrade Your Kubernetes Version Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). -Rancher’s SLA’s are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. +Rancher’s SLAs are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. ### Kill Pods Randomly During Testing Run chaoskube or a similar mechanism to randomly kill pods in your test environment. This will test the resiliency of your infrastructure and the ability of Kubernetes to self-heal. It's not recommended to run this in your production environment. @@ -26,7 +26,7 @@ Rancher's "Add Cluster" UI is preferable for getting started with Kubernetes clu Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2-terraform-provider/) for working with Rancher 2.0 Kubernetes. It is called the [Rancher2 Provider.](https://www.terraform.io/docs/providers/rancher2/index.html) ### Upgrade Rancher in a Staging Environment -All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. +All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. ### Renew Certificates Before they Expire Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/) to track certificate expiration. @@ -48,7 +48,7 @@ When installing or upgrading a non-production environment to an early release, a Make sure the feature version you are upgrading to is considered "stable" as determined by Rancher. Use the beta, release candidate, and "latest" versions in a testing, development, or demo environment to try out new features. Feature version upgrades, for example 2.1.x to 2.2.x, should be considered as and when they are released. Some bug fixes and most features are not back ported into older versions. -Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. +Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. For more detail on what happens during the Rancher product lifecycle, refer to the [Support Maintenance Terms](https://rancher.com/support-maintenance-terms/). @@ -99,7 +99,7 @@ In addition to Rancher software updates, closely monitor security fixes for rela # Tips for Multi-Tenant Clusters ### Namespaces -Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy +Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy ### Project Isolation Use Rancher's Project Isolation to automatically generate Network Policy between Projects (sets of Namespaces). This further protects workloads from interference @@ -108,18 +108,18 @@ Use Rancher's Project Isolation to automatically generate Network Policy between Enforce use of sane resource limit definitions for every deployment in your cluster. This not only protects the owners of the deployment, but the neighboring resources from other tenants as well. Remember, namespaces do not isolate at the node level, so over-consumption of resources on a node affects other namespace deployments. Admission controllers can be written to require resource limit definitions ### Resource Requirements -Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. +Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. # Class of Service and Kubernetes Clusters A class of service describes the expectations around cluster uptime, durability, and duration of maintenance windows. Typically organizations group these characteristics into labels such as "dev" or "prod" ### Consider fault domains -Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. +Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. ### Upgrade risks -Upgrades of Kuberentes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. +Upgrades of Kubernetes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. -### Resource Efficiency +### Resource Efficiency Clusters can be built with varying degrees of redundancy. In a class of service with low expectations for uptime, resources and cost can be conserved by building clusters without redundant Kubernetes control components. This approach may also free up more budget/resources to increase the redundancy at the production level # Network Security diff --git a/content/rancher/v2.x/en/catalog/_index.md b/content/rancher/v2.x/en/catalog/_index.md index ec079814f6c..b0874d88278 100644 --- a/content/rancher/v2.x/en/catalog/_index.md +++ b/content/rancher/v2.x/en/catalog/_index.md @@ -1,5 +1,6 @@ --- -title: Catalogs and Apps +title: Catalogs, Helm Charts and Apps +description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. weight: 4000 aliases: - /rancher/v2.x/en/concepts/global-configuration/catalog/ @@ -7,29 +8,38 @@ aliases: - /rancher/v2.x/en/tasks/global-configuration/catalog/ --- -## Catalogs - Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. -_Catalogs_ are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. - ->A collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. +- **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. +- **Helm charts** are a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. Rancher improves on Helm catalogs and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. -## Catalog Scopes +This section covers the following topics: -Catalogs can be added at different scopes of Rancher. +- [Catalog scopes](#catalog-scopes) +- [Enabling built-in global catalogs](#enabling-built-in-global-catalogs) +- [Adding custom global catalogs](#adding-custom-global-catalogs) + - [Add custom Git repositories](#add-custom-git-repositories) + - [Add custom Helm chart repositories](#add-custom-helm-chart-repositories) + - [Add private Git/Helm chart repositories](#add-private-git-helm-chart-repositories) +- [Launching catalog applications](#launching-catalog-applications) +- [Working with catalogs](#working-with-catalogs) + - [Apps](#apps) + - [Global DNS](#global-dns) + - [Chart compatibility with Rancher](#chart-compatibility-with-rancher) -Scope | Description ---- | --- -Global | Catalogs added at this scope are available for all clusters and all projects in Rancher. -Cluster | Catalogs added within a cluster are available for all projects in that cluster. -Project | Catalogs added within a project are only available for that project. +# Catalog Scopes -## Global catalogs +Within Rancher, you can manage catalogs at three different scopes. Global catalogs are shared across all clusters and project. There are some use cases where you might not want to share catalogs across between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. -## Enabling Built-in Catalogs +Scope | Description | Available As of | +--- | --- | --- | +Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | +Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | +Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | + +# Enabling Built-in Global Catalogs Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. @@ -53,15 +63,14 @@ Within Rancher, there are default catalogs packaged as part of Rancher. These ca **Result**: The chosen catalogs are enabled. Wait a few minutes for Rancher to replicate the catalog charts. When replication completes, you'll be able to see them in any of your projects by selecting **Apps** from the main navigation bar. In versions prior to v2.2.0, you can select **Catalog Apps** from the main navigation bar. -## Adding Custom Catalogs +# Adding Custom Global Catalogs Adding a catalog is as simple as adding a catalog name, a URL and a branch name. -#### Add Custom Git Repositories +### Add Custom Git Repositories The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. - -#### Add Custom Helm Chart Repositories +### Add Custom Helm Chart Repositories A Helm chart repository is an HTTP server that houses one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. @@ -69,7 +78,7 @@ Helm comes with built-in package server for developer testing (helm serve). The In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. -#### Add Private Git/Helm Chart Repositories +### Add Private Git/Helm Chart Repositories _Available as of v2.2.0_ In Rancher v2.2.0, you can add private catalog repositories using credentials like Username and Password. You may also want to use the @@ -90,7 +99,7 @@ NEEDS TO BE FIXED FOR 2.0: Any [users]({{site.baseurl}}/rancher/{{page.version}} **Result**: Your catalog is added to Rancher. -## Launching Catalog Applications +# Launching Catalog Applications After you've either enabled the built-in catalogs or added your own custom catalog, you can start launching any catalog application.> @@ -111,7 +120,7 @@ After you've either enabled the built-in catalogs or added your own custom catal * For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs), answers are provided as key value pairs in the **Answers** section. * Keys and values are available within **Detailed Descriptions**. - * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. + * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of-set), as Rancher passes them as `--set` flags to Helm. For example, when entering an answer that includes two values separated by a comma (i.e., `abc, bcd`), wrap the values with double quotes (i.e., `"abc, bcd"`). @@ -121,34 +130,28 @@ After you've either enabled the built-in catalogs or added your own custom catal By creating a customized repository with added files, Rancher improves on Helm repositories and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. -### Catalog Scope - -Within Rancher, you can manage catalogs at three different scopes. Global catalogs is shared across all clusters and project. There are some use cases where you might not want to share catalogs across between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. - -Scope | Description | Available As of | ---- | --- | --- | -Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | -Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | -Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | - -### Working with catalogs +# Working with Catalogs There are two types of catalogs in Rancher. Learn more about each type: * [Built-in Global Catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/built-in/) * [Custom Catalogs]({{< baseurl >}}/rancher/v2.x/en/catalog/custom/) -## Apps +### Apps In Rancher, applications are deployed from the templates in a catalog. Rancher supports two types of applications: * [Multi-cluster applications]({{< baseurl >}}/rancher/v2.x/en/catalog/multi-cluster-apps/) * [Applications deployed in a specific Project]({{< baseurl >}}/rancher/v2.x/en/catalog/apps) -## Global DNS +### Global DNS _Available as v2.2.0_ When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. -For more information on how to use this feature, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/admin-settings/globaldns/). +For more information on how to use this feature, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). + +### Chart Compatibility with Rancher + +Charts now support the fields `rancher_min_version` and `rancher_max_version` in the [`questions.yml` file](https://github.com/rancher/integration-test-charts/blob/master/charts/chartmuseum/v1.6.0/questions.yml) to specify the versions of Rancher that the chart is compatible with. When using the UI, only app versions that are valid for the version of Rancher running will be shown. API validation is done to ensure apps that don't meet the Rancher requirements cannot be launched. An app that is already running will not be affected on a Rancher upgrade if the newer Rancher version does not meet the app's requirements. diff --git a/content/rancher/v2.x/en/catalog/custom/creating/_index.md b/content/rancher/v2.x/en/catalog/custom/creating/_index.md index 582cdbce5c0..bc1ed5e919d 100644 --- a/content/rancher/v2.x/en/catalog/custom/creating/_index.md +++ b/content/rancher/v2.x/en/catalog/custom/creating/_index.md @@ -1,11 +1,11 @@ --- -title: Creating Custom Catalogs +title: Creating Custom Catalogs Apps weight: 4000 aliases: - /rancher/v2.x/en/tasks/global-configuration/catalog/customizing-charts/ --- -Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. +Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. ## Chart Types @@ -73,16 +73,33 @@ Before you create your own custom catalog, you should have a basic understanding ![questions.yml]({{< baseurl >}}/img/rancher/questions.png) -### Question Variable Reference +### Questions.yml -This reference contains variables that you can use in `questions.yml`. +Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. + +#### Min/Max Rancher versions + +_Available as of v2.3.0_ + +For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. + +> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. + +``` +rancher_min_version: 2.3.0 +rancher_max_version: 2.3.99 +``` + +#### Question Variable Reference + +This reference contains variables that you can use in `questions.yml` nested under `questions:`. | Variable | Type | Required | Description | | ------------- | ------------- | --- |------------- | | variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | | label | string | true | Define the UI label. | | description | string | false | Specify the description of the variable.| -| type | string | false | Default to `string` if not specified (current supported types are string, boolean, int, enum, password, storageclass and hostname).| +| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| | required | bool | false | Define if the variable is required or not (true \| false)| | default | string | false | Specify the default value. | | group | string | false | Group questions by input value. | @@ -104,7 +121,7 @@ This reference contains variables that you can use in `questions.yml`. You can fill your custom catalogs with either Helm Charts or Rancher Charts, although we recommend Rancher Charts due to their enhanced user experience. ->**Note:** For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://docs.helm.sh/developing_charts/). +>**Note:** For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). 1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in [Chart Directory Structure](#chart-directory-structure). diff --git a/content/rancher/v2.x/en/catalog/globaldns/_index.md b/content/rancher/v2.x/en/catalog/globaldns/_index.md index 21a7b84a57d..ffa841ae509 100644 --- a/content/rancher/v2.x/en/catalog/globaldns/_index.md +++ b/content/rancher/v2.x/en/catalog/globaldns/_index.md @@ -7,7 +7,7 @@ _Available as of v2.2.0_ Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. -> **Note:** Global DNS is only available in [HA setups]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) with the [`local` cluster enabled]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#import-local-cluster). +> **Note:** Global DNS is only available in [Kubernetes installations]({{}}/rancher/v2.x/en/installation/k8s-install/) with the [`local` cluster enabled]({{}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster). ## Global DNS Providers @@ -35,7 +35,7 @@ By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-s 1. From the **Global View**, select **Tools > Global DNS Providers**. 1. To add a provider, choose from the available provider options and configure the Global DNS Provider with necessary credentials and an optional domain. -1. (Optional) Add additional users so they could use the provider when creating Globel DNS entries as well as manage the Global DNS provider. +1. (Optional) Add additional users so they could use the provider when creating Global DNS entries as well as manage the Global DNS provider. {{% accordion id="route53" label="Route53" %}} 1. Enter a **Name** for the provider. @@ -63,7 +63,7 @@ By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-s >**Notes:** > ->- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running [`local` cluster]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#import-local-cluster), and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. +>- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running [`local` cluster]({{< baseurl >}}/rancher/v2.x/en/installation/options/chart-options/#import-local-cluster), and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. >- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. {{% /accordion %}} @@ -81,7 +81,7 @@ By default, only [global administrators]({{< baseurl >}}/rancher/v2.x/en/admin-s In order for Global DNS entries to be programmed, you will need to add a specific annotation on an ingress in your application or target project and this ingress needs to use a specific `hostname` and an annotation that should match the FQDN of the Global DNS entry. -1. For any application that you want targetted for your Global DNS entry, find an ingress associated with the application. +1. For any application that you want targeted for your Global DNS entry, find an ingress associated with the application. 1. In order for the DNS to be programmed, the following requirements must be met: * The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. * The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. diff --git a/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md b/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md index 62936ccb55a..f40b0e593c7 100644 --- a/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md +++ b/content/rancher/v2.x/en/catalog/multi-cluster-apps/_index.md @@ -18,7 +18,7 @@ After creating a multi-cluster application, you can program a [Global DNS entry] 3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. -4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. +4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. 5. Select a **Template Version**. @@ -50,13 +50,13 @@ In the **Upgrades** section, select the upgrade strategy to use, when you decide #### Roles -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/#launching-catalog-applications), that specific users's permissions are used for creation of all workloads/resources that is required by the app. +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/#launching-catalog-applications), that specific user's permissions are used for creation of all workloads/resources that is required by the app. For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. -- **Project** - This is the equivalent of a [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. +- **Project** - This is the equivalent of a [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. - **Cluster** - This is the equivalent of a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. @@ -68,7 +68,7 @@ When launching the application, Rancher will confirm if you have these permissio For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is reuired to wrap the values with double quotes (i.e., ``"abc, bcd"``). +> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). #### Using a `questions.yml` file @@ -84,7 +84,7 @@ By default, multi-cluster applications can only be managed by the user who creat 1. Find the user that you want to add by typing in the member's name in the **Member** search box. -2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. +2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#configuration-options-to-make-a-multi-cluster-app), the [application specific configuration options](#application-configuration-options), the [members who can interact with the multi-cluster application](#members) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. @@ -108,7 +108,7 @@ The ability to use the same configuration to deploy the same application across - **Answer**: Enter the answer that you want to be used instead. -## Upgrading Multi-Cluster App Roles and Projects +## Upgrading Multi-Cluster App Roles and Projects - **Changing Roles on an existing Multi-Cluster app** The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. @@ -120,7 +120,7 @@ The creator and any users added with the access-type "owner" to a multi-cluster ## Multi-Cluster Application Management -One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of manangement.Multi-cluster applications can be cloned, upgraded or rolled back. +One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. 1. From the **Global** view, choose **Apps** in the navigation bar. diff --git a/content/rancher/v2.x/en/cli/_index.md b/content/rancher/v2.x/en/cli/_index.md index de92ac71134..0baa8f9da86 100644 --- a/content/rancher/v2.x/en/cli/_index.md +++ b/content/rancher/v2.x/en/cli/_index.md @@ -1,8 +1,9 @@ --- -title: CLI +title: Using the Rancher Command Line Interface +description: The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI +metaTitle: "Using the Rancher Command Line Interface " +metaDescription: "The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI" weight: 6000 -aliases: - - /rancher/v2.x/en/concepts/cli-configuration/ --- The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. diff --git a/content/rancher/v2.x/en/cluster-admin/_index.md b/content/rancher/v2.x/en/cluster-admin/_index.md index f86a2bc5e84..09397d9c2c7 100644 --- a/content/rancher/v2.x/en/cluster-admin/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/_index.md @@ -3,66 +3,15 @@ title: Cluster Administration weight: 2005 --- -## What's a Kubernetes Cluster? - -A cluster is a group of computers that work together as a single system. - -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. - -### Kubernetes Cluster Node Components - -Each computing resource in a Kubernetes Cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -#### etcd Nodes - -[etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) nodes run the etcd database. The etcd database component is a key value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. - -etcd is a distributed key value store, meaning it runs on multiple nodes so that there's always a backup available for fail over. Even though you can run etcd on a single node, you should run it on multiple nodes. We recommend 3, 5, or 7 etcd nodes for redundancy. - -#### Control Plane Nodes - -[Control plane](https://kubernetes.io/docs/concepts/#kubernetes-control-plane) nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although two or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -#### Worker Nodes - -[Worker nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) run: - -- _Kubelets_: An agent that monitors the state of the node, ensuring your containers are healthy. -- _Workloads_: The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your workloads. - After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. -## Interacting with Clusters +This page covers the following topics: -- **Rancher UI** +- [Switching between clusters](#switching-between-clusters) +- [Managing clusters in Rancher](#managing-clusters-in-rancher) +- [Configuring tools](#configuring-tools) - Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. - -- **kubectl** - - You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - - - **Rancher kubectl shell** - - Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. - - For more information, see [Accessing Clusters with kubectl Shell]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). - - - **Terminal remote connection** - - You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. - - For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file). - -- **Rancher CLI** - - You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - -- **Rancher API** - - Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. ## Switching between Clusters @@ -76,9 +25,8 @@ After clusters have been [provisioned into Rancher]({{< baseurl >}}/rancher/v2.x | Action | [Rancher launched Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters) | | --- | --- | ---| ---| -| [Using kubeconfig file to access a Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/kubeconfig/) | * | * | * | -| [Using kubectl to Access a Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/kubectl/) | * | * | * | -| [Adding Cluster Members]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cluster-members/) | * | * | * | +| [Using kubectl and a kubeconfig file to Access a Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) | * | * | * | +| [Adding Cluster Members]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/) | * | * | * | | [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/editing-clusters/) | * | * | * | | [Managing Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/nodes) | * | * | * | | [Managing Persistent Volumes and Storage Classes]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) | * | * | * | diff --git a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md index a29dda28a3b..e4aa716ccbb 100644 --- a/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/backing-up-etcd/_index.md @@ -7,7 +7,7 @@ _Available as of v2.2.0_ In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. -Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additonally, one-time snapshots can easily be taken as well. +Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. >**Note:** If you have any Rancher launched Kubernetes clusters that were created prior to v2.2.0, after upgrading Rancher, you must [edit the cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots prior to v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/restoring-etcd/). @@ -51,6 +51,14 @@ Rancher supports two different backup targets: By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. +#### Safe Timestamps + +_Available as of v2.3.0_ + +As of v2.2.6, snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. As of Rancher v2.3.0, the option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. + +>>**Note:** This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. + ### S3 Backup Target The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. @@ -62,6 +70,13 @@ The `S3` backup target allows users to configure a S3 compatible backend to stor |S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | |S3 Access Key|S3 access key with permission to access the backup bucket|*| |S3 Secret Key|S3 secret key with permission to access the backup bucket|*| +| Custom CA Certificate | A custom certificate used to access private S3 backends _Available as of v2.2.5_ || + +#### Using a custom CA certificate for S3 + +_Available as of v2.2.5_ + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. # IAM Support for Storing Snapshots in S3 The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: diff --git a/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md b/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md index 70617ae7199..2323917c395 100644 --- a/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/certificate-rotation/_index.md @@ -55,4 +55,33 @@ Rancher launched Kubernetes clusters have the ability to rotate the auto-generat 5. Click on **Send Request**. -**Results:** All kubernetes certificates will be rotated. +**Results:** All Kubernetes certificates will be rotated. + +### Rotating Expired Certificates After Upgrading Older Rancher Versions + +If you are upgrading from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your clusters have expired certificates, some manual steps are required to complete the certificate rotation. + +1. For the `controlplane` and `etcd` nodes, log in to each corresponding host and check if the certificate `kube-apiserver-requestheader-ca.pem` is in the following directory: + + ``` + cd /etc/kubernetes/.tmp + ``` + + If the certificate is not in the directory, perform the following commands: + + ``` + cp kube-ca.pem kube-apiserver-requestheader-ca.pem + cp kube-ca-key.pem kube-apiserver-requestheader-ca-key.pem + cp kube-apiserver.pem kube-apiserver-proxy-client.pem + cp kube-apiserver-key.pem kube-apiserver-proxy-client-key.pem + ``` + + If the `.tmp` directory does not exist, you can copy the entire SSL certificate to `.tmp`: + + ``` + cp -r /etc/kubernetes/ssl /etc/kubernetes/.tmp + ``` + +1. Rotate the certificates. For Rancher v2.0.x and v2.1.x, use the [Rancher API.](#certificate-rotation-in-rancher-v2-1-x-and-v2-0-x) For Rancher 2.2.x, [use the UI.](#certificate-rotation-in-rancher-v2-2-x) + +1. After the command is finished, check if the `worker` nodes are Active. If not, log in to each `worker` node and restart the kubelet and proxy. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md index ba21aa77071..c5929f81b0c 100644 --- a/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/_index.md @@ -1,13 +1,14 @@ --- -title: Cleaning up Clusters +title: Removing Kubernetes Components from Nodes +description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually weight: 2055 -aliases: - - /rancher/v2.x/en/faq/cleaning-cluster-nodes/ - - /rancher/v2.x/en/admin-settings/removing-rancher/user-cluster-nodes/ --- + +This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. + When you use Rancher to [launch nodes for a cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher), resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. +When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. ## What Gets Removed? @@ -42,29 +43,31 @@ $ sudo reboot $ sudo shutdown -r now ``` -## Cleaning a Node Manually +## Removing Rancher Components from a Cluster Manually -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually clean the node. +When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. >**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. -### Imported Cluster Nodes +### Removing Rancher Components from Imported Clusters -For imported clusters, the process for removing Rancher from its nodes is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. +For imported clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. + +After the imported cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. {{% tabs %}} {{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your nodes. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. +>**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. After you initiate the removal of an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) using the Rancher UI (or API), the following events occur. -1. Rancher creates a `serviceAccount` that it uses to remove the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the cluster. +1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher and Kubernetes components off of the node. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. +1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. -1. Rancher is removed from the cluster nodes. However, the cluster persists, running the native version of Kubernetes. +1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - **Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. +**Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. {{% /tab %}} {{% tab "By Script" %}} diff --git a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md index 43c8dc93a5e..b097e8b7b12 100644 --- a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md @@ -7,22 +7,18 @@ aliases: If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. -## Caveats +You can clone clusters only if the nodes in the cluster are hosted by an infrastructure provider, such as EC2, Azure, or DigitalOcean. -- Only [cluster types]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options) that interact with cloud hosts over API can be cloned. Duplication of imported clusters and custom clusters provisioned using Docker machine is not supported. +Duplication of imported clusters, clusters in hosted Kubernetes providers, and custom clusters provisioned using Docker machine is not supported. - | Cluster Type | Cloneable? | - | -------------------------------- | ------------- | - | [Hosted Kubernetes Providers][1] | ✓ | - | [Nodes Hosted by Infrastructure Provider][2] | ✓ | - | [Custom Cluster][3] | | - | [Imported Cluster][4] | | -- During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. +| Cluster Type | Cloneable? | +|----------------------------------|---------------| +| [Nodes Hosted by Infrastructure Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | +| [Hosted Kubernetes Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) | | +| [Custom Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) | | +| [Imported Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) | | -[1]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ -[2]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ -[3]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/ -[4]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ +> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. ## Prerequisites @@ -57,82 +53,32 @@ Begin by using Rancher CLI to export the configuration for the cluster that you Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. +> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. -1. As depicted in one of the examples below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. -{{% accordion id="gke" label="GKE" %}} -```yml -Version: v3 -clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - googleKubernetesEngineConfig: - credential: |- - { - "type": "service_account", - "project_id": "gke-cluster-221300", - "private_key_id": "1d210afae352bc298bde1b3e680ec0c8b22cdd61" -``` -{{% /accordion %}} -{{% accordion id="eks" label="EKS" %}} -```yml -Version: v3 -clusters: - : # ENTER UNIQUE NAME - amazonElasticContainerServiceConfig: - accessKey: 00000000000000000000 - associateWorkerNodePublicIp: true - instanceType: t2.medium - maximumNodes: 3 - minimumNodes: 1 - region: us-west-2 - secretKey: 0000000000000000000000000000000000000000 - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false -``` -{{% /accordion %}} -{{% accordion id="aks" label="AKS" %}} -```yml -Version: v3 -clusters: - : # ENTER UNIQUE NAME - azureKubernetesServiceConfig: - adminUsername: azureuser - agentPoolName: rancher - agentVmSize: Standard_D5_v2 - clientId: 00000000-0000-0000-0000-000000000000 - clientSecret: 00000000000000000000000000000000000000000000 - count: 3 - kubernetesVersion: 1.11.2 - location: westus - osDiskSizeGb: 100 - resourceGroup: docker-machine - sshPublicKeyContents: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJc2kDExgRaDLD -``` -{{% /accordion %}} -{{% accordion id="ec2" label="Nodes Hosted by Infrastructure Provider (EC2, Azure, or DigitalOcean )" %}} -```yml -Version: v3 -clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - rancherKubernetesEngineConfig: - addonJobTimeout: 30 - authentication: - strategy: x509 - authorization: {} - bastionHost: {} - cloudProvider: {} - ignoreDockerVersion: true -``` -{{% /accordion %}} +1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. -1. **Nodes Hosted by Infrastructure Provider Only:** For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. + ```yml + Version: v3 + clusters: + : # ENTER UNIQUE NAME + dockerRootDir: /var/lib/docker + enableNetworkPolicy: false + rancherKubernetesEngineConfig: + addonJobTimeout: 30 + authentication: + strategy: x509 + authorization: {} + bastionHost: {} + cloudProvider: {} + ignoreDockerVersion: true + ``` + +1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. ```yml nodePools: diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md new file mode 100644 index 00000000000..973ba43dcce --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/_index.md @@ -0,0 +1,34 @@ +--- +title: Cluster Access +weight: 1 +--- + +There are many ways you can interact with Kubernetes clusters that are managed by Rancher: + +- **Rancher UI** + + Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. + +- **kubectl** + + You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: + + - **Rancher kubectl shell** + + Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. + + For more information, see [Accessing Clusters with kubectl Shell]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-shell). + + - **Terminal remote connection** + + You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. + + For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubectl/#accessing-clusters-with-kubectl-and-a-kubeconfig-file). + +- **Rancher CLI** + + You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. + +- **Rancher API** + + Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{< baseurl >}}/rancher/v2.x/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md new file mode 100644 index 00000000000..a961b633a34 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/ace/_index.md @@ -0,0 +1,43 @@ +--- +title: How the Authorized Cluster Endpoint Works +weight: 2015 +--- + +This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) + +### About the kubeconfig File + +The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). + +This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. + +After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. + +### Two Authentication Methods for RKE Clusters + +If the cluster is not an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. + +For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: + +- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. +- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. + +This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. + +To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) + +These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.x/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. + +### About the kube-api-auth Authentication Webhook + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. + +The scheduling rules for `kube-api-auth` are listed below: + +_Applies to v2.3.0 and higher_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md new file mode 100644 index 00000000000..154fea58a24 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/_index.md @@ -0,0 +1,57 @@ +--- +title: Adding Users to Clusters +weight: 2020 +aliases: + - /rancher/v2.x/en/tasks/clusters/adding-managing-cluster-members/ + - /rancher/v2.x/en/k8s-in-rancher/cluster-members/ + - /rancher/v2.x/en/cluster-admin/cluster-members +--- + +If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. + +>**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. + +There are two contexts where you can add cluster members: + +- Adding Members to a New Cluster + + You can add members to a cluster as you create it (recommended if possible). + +- [Adding Members to an Existing Cluster](#editing-cluster-membership) + + You can always add members to a cluster after a cluster is provisioned. + +## Editing Cluster Membership + +Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. + +1. From the **Global** view, open the cluster that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the cluster. + + If external authentication is configured: + + - Rancher returns users from your [external authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. + + >**Using AD but can't find your users?** + >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/). + + - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). + +4. Assign the user or group **Cluster** roles. + + [What are Cluster Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) + + >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). + > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). + +**Result:** The chosen users are added to the cluster. + +- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md new file mode 100644 index 00000000000..f8d6817e65f --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/_index.md @@ -0,0 +1,109 @@ +--- +title: "Access a Cluster with Kubectl and kubeconfig" +description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." +weight: 2010 +aliases: + - /rancher/v2.x/en/k8s-in-rancher/kubectl/ + - /rancher/v2.x/en/cluster-admin/kubectl + - /rancher/v2.x/en/concepts/clusters/kubeconfig-files/ + - /rancher/v2.x/en/k8s-in-rancher/kubeconfig/ + - /rancher/2.x/en/cluster-admin/kubeconfig +--- + +This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. + +For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). + +- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) +- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) +- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) +- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) + - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) + - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) + + +### Accessing Clusters with kubectl Shell in the Rancher UI + +You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. + +1. From the **Global** view, open the cluster that you want to access with kubectl. + +2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. + +### Accessing Clusters with kubectl from Your Workstation + +This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. + +This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. + +> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. +1. Click **Kubeconfig File**. +1. Copy the contents displayed to your clipboard. +1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: + ``` + kubectl --kubeconfig /custom/path/kube.config get pods + ``` +1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. + + +### Note on Resources Created Using kubectl + +Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. + +# Authenticating Directly with a Downstream Cluster + +This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) + +This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) + +We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. + +> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) + +To find the name of the context(s) in your downloaded kubeconfig file, run: + +``` +kubectl config get-contexts --kubeconfig /custom/path/kube.config +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* my-cluster my-cluster user-46tmn + my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn +``` + +In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. + +With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) + +Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. + +When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. + +### Connecting Directly to Clusters with FQDN Defined + +If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: + +``` +kubectl --context -fqdn get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods +``` + +### Connecting Directly to Clusters without FQDN Defined + +If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: +``` +kubectl --context - get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context - get pods +``` diff --git a/content/rancher/v2.x/en/cluster-admin/cluster-members/_index.md b/content/rancher/v2.x/en/cluster-admin/cluster-members/_index.md deleted file mode 100644 index 8f8a197ad77..00000000000 --- a/content/rancher/v2.x/en/cluster-admin/cluster-members/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Adding Users to Clusters -weight: 2020 -aliases: - - /rancher/v2.x/en/tasks/clusters/adding-managing-cluster-members/ - - /rancher/v2.x/en/cluster-provisioning/cluster-members/ - - /rancher/v2.x/en/k8s-in-rancher/cluster-members/ ---- - -If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. - ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. - -There are two contexts where you can add cluster members: - -- Adding Members to a New Cluster - - You can add members to a cluster as you create it (recommended if possible). - -- [Adding Members to an Existing Cluster]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/) - - You can always add members to a cluster after a cluster is provisioned. diff --git a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md index c047fb9dbdc..5c2cf122f0c 100644 --- a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md @@ -1,109 +1,38 @@ --- -title: Editing Clusters +title: Cluster Configuration weight: 2025 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/editing-clusters/ --- After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **Ellipsis (...) > Edit** for the cluster that you want to edit. To Edit an Existing Cluster -![Edit Cluster]({{< baseurl >}}/img/rancher/edit-cluster.png) +![Edit Cluster]({{}}/img/rancher/edit-cluster.png) The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. -The following table lists the options and settings available for each cluster type: +The following table summarizes the options and settings available for each cluster type: - Cluster Type | Member Roles | Cluster Options | Node Pools ----------|----------|---------|---------| - [RKE-Launched]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#rancher-launched-kubernetes) | ✓ | ✓ | ✓ | - [Hosted Kubernetes Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#hosted-kubernetes-cluster) | ✓ | | | - [Imported]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#import-existing-cluster) | ✓ | | | + Rancher Capability | RKE Launched | Hosted Kubernetes Cluster | Imported Cluster + ---------|----------|---------|---------| + Manage member roles | ✓ | ✓ | ✓ + Edit cluster options | ✓ | | + Manage node pools | ✓ | | ## Editing Cluster Membership -Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. - -1. From the **Global** view, open the cluster that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the cluster. - - If external authentication is configured: - - - Rancher returns users from your [external authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) source as you type. - - >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/ad/). - - - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -4. Assign the user or group **Cluster** roles. - - [What are Cluster Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles). - -**Result:** The chosen users are added to the cluster. - -- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. +Cluster administrators can [edit the membership for a cluster,]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members) controlling which Rancher users can access the cluster and what features they can use. ## Cluster Options When editing clusters, clusters that are [launched using RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) feature more options than clusters that are imported or hosted by a Kubernetes provider. The headings that follow document options available only for RKE clusters. -### Upgrading Kubernetes +### Updating ingress-nginx -Following an upgrade to the latest version of Rancher, you can update your existing clusters to use the latest supported version of Kubernetes. Before a new version of Rancher is released, it's tested with the latest versions of Kubernetes to ensure compatibility. +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. ->**Recommended:** Before upgrading Kubernetes, [backup your cluster]({{< baseurl >}}/rancher/v2.x/en/backups). +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. -1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **Vertical Ellipsis (...) > Edit**. - -1. Expand **Cluster Options**. - -1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. - -1. Click **Save**. - -**Result:** Kubernetes begins upgrading for the cluster. During the upgrade, your cluster is unavailable. - -> **Note:** The `ingress-nginx` pods are set to only upgrade on delete. After upgrading your cluster, you need to delete these pods to get the correct version for your deployment. - -### Adding a Pod Security Policy - -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. - -You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. - -1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **Vertical Ellipsis (...) > Edit**. - -2. Expand **Cluster Options**. - -3. From **Pod Security Policy Support**, select **Enabled**. - - >**Note:** This option is only available for clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - - Rancher ships with [policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. - -5. Click **Save**. - -**Result:** The pod security policy is applied to the cluster and any projects within the cluster. - ->**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. -> ->To check if a running workload passes your pod security policy, clone or upgrade it. - -### Editing Other Cluster Options +# Editing Other Cluster Options In [clusters launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. @@ -115,42 +44,29 @@ In [clusters launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioni Option | Description | ---------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes](#upgrading-kubernetes). | + Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes). | Network Provider | The [container networking interface]({{< baseurl >}}/rancher/v2.x/en/faq/networking/#cni-providers) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | Pod Security Policy Support | Enables [pod security policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install-external-lb/#software), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | + Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{< baseurl >}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. |
-#### Editing Cluster as YAML + +# Editing Cluster as YAML >**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{< baseurl >}}/rke/latest/en/config-options/) in an RKE installation. +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. - To read from an existing RKE file, click **Read from File**. +In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options) + ![image]({{< baseurl >}}/img/rancher/cluster-options-yaml.png) For an example of RKE config file syntax, see the [RKE documentation]({{< baseurl >}}/rke/latest/en/example-yamls/). - -## Managing Node Pools - -In clusters [launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can: - -- Add new [pools of nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to your cluster. The nodes added to the pool are provisioned according to the [node template]({{< baseurl >}}/rancher/v2.x/en/user-settings/node-templates/) that you use. - - - Click **+** and follow the directions on screen to create a new template. - - - You can also reuse existing templates by selecting one from the **Template** drop-down. - -- Redistribute Kubernetes roles amongst your node pools by making different checkbox selections - -- Scale the number of nodes in a pool up or down (although, if you simply want to maintain your node scale, we recommend using the cluster's [Nodes tab]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/nodes/#nodes-provisioned-by-node-pool) instead.) - ->**Note:** The Node Pools section is not available for imported clusters or clusters hosted by a Kubernetes provider. diff --git a/content/rancher/v2.x/en/cluster-admin/kubeconfig/_index.md b/content/rancher/v2.x/en/cluster-admin/kubeconfig/_index.md deleted file mode 100644 index 32843651a65..00000000000 --- a/content/rancher/v2.x/en/cluster-admin/kubeconfig/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Kubeconfig File -weight: 2010 -aliases: - - /rancher/v2.x/en/concepts/clusters/kubeconfig-files/ - - /rancher/v2.x/en/k8s-in-rancher/kubeconfig/ ---- - -A _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl commandline tool (or other clients). - -For more details on how kubeconfig and kubectl work together, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). - -When you create a cluster using the Rancher GUI, Rancher automatically creates a kubeconfig for your cluster. - -This kubeconfig file and its contents are specific to the cluster you are viewing. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. - -For more information, see [Using kubectl to Access a Cluster]({{< baseurl >}}/rancher/v2.x/en//k8s-in-rancher/kubectl). - ->**Note:** By default, kubectl checks `~/.kube/config` for a kubeconfig file, but you can use any directory you want using the `--kubeconfig` flag. For example: - -``` -kubectl --kubeconfig /custom/path/kube.config get pods -``` - -## Accessing Rancher Launched Kubernetes clusters without Rancher server running - -By default, Rancher generates a kubeconfig file that will proxy through the Rancher server to connect to the Kubernetes API server on a cluster. - -For [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) clusters, which have [Authorized Cluster Endpoint]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) enabled, Rancher generates extra context(s) in the kubeconfig file in order to connect directly to the cluster. - -> **Note:** By default, all Rancher Launched Kubernetes clusters have [Authorized Cluster Endpoint]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) enabled. - -To find the name of the context(s), run: - -``` -kubectl config get-contexts --kubeconfig /custom/path/kube.config -CURRENT NAME CLUSTER AUTHINFO NAMESPACE -* my-cluster my-cluster user-46tmn - my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn -``` - -### Clusters with FQDN defined as an Authorized Cluster Endpoint - -If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -``` -# Assuming the kubeconfig file is located at ~/.kube/config -kubectl --context -fqdn get nodes - -# Directly referencing the location of the kubeconfig file -kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods -``` - -### Clusters without FQDN defined as an Authorized Cluster Endpoint - -If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -``` -# Assuming the kubeconfig file is located at ~/.kube/config -kubectl --context - get nodes - -# Directly referencing the location of the kubeconfig file -kubectl --kubeconfig /custom/path/kube.config --context - get pods -``` diff --git a/content/rancher/v2.x/en/cluster-admin/kubectl/_index.md b/content/rancher/v2.x/en/cluster-admin/kubectl/_index.md deleted file mode 100644 index 2d332ac495a..00000000000 --- a/content/rancher/v2.x/en/cluster-admin/kubectl/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Using kubectl to Access a Cluster -weight: 2015 -aliases: - - /rancher/v2.x/en/tasks/clusters/using-kubectl-to-access-a-cluster/ - - /rancher/v2.x/en/k8s-in-rancher/kubectl/ ---- -You can access and manage your Kubernetes clusters using kubectl in two ways: - -- [Accessing Clusters with kubectl Shell](#accessing-clusters-with-kubectl-shell) -- [Accessing Clusters with kubectl CLI and a kubeconfig File]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) - -## Resources created using kubectl - -Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. - -## Accessing Clusters with kubectl Shell - -You can access and manage your clusters by logging into Rancher and opening the kubectl shell. No further configuration necessary. - -1. From the **Global** view, open the cluster that you want to access with kubectl. - -2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. - - For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). - - -## Accessing Clusters with kubectl and a kubeconfig File - -Alternatively, you can access your clusters by installing kubectl on your workstation, and then directing it toward a kubeconfig file automatically generated by Rancher. After install and configuration, you can access your clusters without logging into Rancher. - -1. Install kubectl on your workstation. For more information, see [Kubernetes Documentation: Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -2. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. - -3. Copy the cluster's kubeconfig file to your workstation. - - 1. Click **Kubeconfig File**. - - 2. Copy the contents displayed to your clipboard. - - 3. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. - - >**Note:** The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in the sample that follows: - > - >``` - kubectl --kubeconfig /custom/path/kube.config get pods - ``` -4. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. - - If you have launched a [Rancher Launched Kubernetes cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) and want to use kubectl without using Rancher, see [Kubeconfig Files]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/). - - For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). diff --git a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md b/content/rancher/v2.x/en/cluster-admin/nodes/_index.md index 56019ecbecb..31c3e595bae 100644 --- a/content/rancher/v2.x/en/cluster-admin/nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/nodes/_index.md @@ -1,5 +1,5 @@ --- -title: Nodes +title: Nodes and Node Pools weight: 2030 aliases: - /rancher/v2.x/en/k8s-in-rancher/nodes/ @@ -7,18 +7,30 @@ aliases: After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) to provision the cluster, there are different node options available. +This page covers the following topics: + +- [Node options for each type of cluster](#node-options-for-each-type-of-cluster) +- [Cordoning and draining nodes](#cordoning-and-draining-nodes) +- [Editing a node](#editing-a-node) +- [Viewing a node API](#viewing-a-node-api) +- [Deleting a node](#deleting-a-node) +- [Scaling nodes](#scaling-nodes) +- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) +- [Managing node pools](#managing-node-pools) To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **Ellipsis** icon (**...**). >**Note:** If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters). +# Node Options for Each Type of Cluster + The following table lists which node options are available for each [type of cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options) in Rancher. Click the links in the **Option** column for more detailed information about each feature. | Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | | ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | | [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable. | | [Drain](#draining-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#editing-a-node) | ✓ | ✓ | ✓ | | Enter a custom name, description, or label for a node. | +| [Edit](#editing-a-node) | ✓ | ✓ | ✓ | | Enter a custom name, description, label, or taints for a node. | | [View API](#viewing-a-node-api) | ✓ | ✓ | ✓ | | View API data. | | [Delete](#deleting-a-node) | ✓ | ✓ | | | Deletes defective nodes from the cluster. | | [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | @@ -29,14 +41,26 @@ The following table lists which node options are available for each [type of clu [3]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ [4]: {{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/ -## Cordoning a Node +### Notes for Node Pool Nodes + +Clusters provisioned using [one of the node pool options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) automatically maintain the node scale that's set during the initial cluster provisioning. This scale determines the number of active nodes that Rancher maintains for the cluster. + +### Notes for Nodes Provisioned by Hosted Kubernetes Providers + +Options for managing nodes [hosted by a Kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. + +### Notes for Imported Nodes + +Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. + +# Cordoning and Draining Nodes _Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. -## Draining a Node - _Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. +When nodes are drained, pods are handled with the following rules: + - For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. - For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. @@ -51,14 +75,14 @@ The node draining options are different based on your version of Rancher. There are two drain modes: aggressive and safe. -- **Aggressive Mode** - +- **Aggressive Mode** + In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. - + Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. -- **Safe Mode** - +- **Safe Mode** + If a node has standalone pods or ephemeral data it will be cordoned but not drained. ### Aggressive and Safe Draining Options for Rancher Prior to v2.2.x @@ -82,7 +106,7 @@ The following list describes each drain option: The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. -### Timeout +### Timeout The amount of time drain should continue to wait before giving up. @@ -99,28 +123,33 @@ Once drain successfully completes, the node will be in a state of `drained`. You >**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). -## Editing a Node +# Editing a Node -Editing a node lets you change its name, add a description of the node, or add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). +Editing a node lets you: + +* Change its name +* Change its description +* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) -## Viewing a Node API +# Viewing a Node API Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.x/en/api/). -## Deleting a Node +# Deleting a Node Use **Delete** to remove defective nodes from the cloud provider. When you the delete a defective node, Rancher automatically replaces it with an identically provisioned node. >**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. -## Scaling Nodes +# Scaling Nodes For nodes hosted by an infrastructure provider, you can scale the number of nodes in each node pool by using the scale controls. This option isn't available for other cluster types. -## SSH into a Node Hosted by an Infrastructure Provider +# SSH into a Node Hosted by an Infrastructure Provider For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. @@ -140,17 +169,19 @@ For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en ``` ssh -i id_rsa root@ ``` + +# Managing Node Pools -## Notes for Node Pool Nodes +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) The node pool features are not available for imported clusters or clusters hosted by a Kubernetes provider. -Clusters provisioned using [one of the node pool options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) automatically maintain the node scale that's set during the initial cluster provisioning. This scale determines the number of active nodes that Rancher maintains for the cluster. +In clusters [launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can: +- Add new [pools of nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) to your cluster. The nodes added to the pool are provisioned according to the [node template]({{< baseurl >}}/rancher/v2.x/en/user-settings/node-templates/) that you use. -## Notes for Nodes Provisioned by Hosted Kubernetes Providers + - Click **+** and follow the directions on screen to create a new template. -Options for managing nodes [hosted by a Kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. + - You can also reuse existing templates by selecting one from the **Template** drop-down. +- Redistribute Kubernetes roles amongst your node pools by making different checkbox selections -## Notes for Imported Nodes - -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. +- Scale the number of nodes in a pool up or down (although, if you simply want to maintain your node scale, we recommend using the cluster's [Nodes tab]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/nodes/#nodes-provisioned-by-node-pool) instead.) diff --git a/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md new file mode 100644 index 00000000000..11e415f5b3a --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/pod-security-policy/_index.md @@ -0,0 +1,30 @@ +--- +title: Adding a Pod Security Policy +weight: 80 +--- + +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) + +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. + +You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. + +1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **Vertical Ellipsis (...) > Edit**. + +2. Expand **Cluster Options**. + +3. From **Pod Security Policy Support**, select **Enabled**. + + >**Note:** This option is only available for clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + +4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. + + Rancher ships with [policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. + +5. Click **Save**. + +**Result:** The pod security policy is applied to the cluster and any projects within the cluster. + +>**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. +> +>To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md b/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md index 592ea784b6b..91514708290 100644 --- a/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/projects-and-namespaces/_index.md @@ -1,45 +1,93 @@ --- -title: Projects and Namespaces +title: Projects and Kubernetes Namespaces with Rancher +description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces weight: 2032 aliases: - /rancher/v2.x/en/concepts/projects/ - /rancher/v2.x/en/tasks/projects/ - /rancher/v2.x/en/tasks/projects/create-project/ - - /rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/ + - /rancher/v2.x/en/tasks/projects/create-project/ --- -## Projects +A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. -_Projects_ are organizational objects introduced in Rancher that ease the administrative burden of your cluster. You can use projects to support multi-tenancy. +A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. -Projects provide an extra level of organization in your Kubernetes clusters beyond [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). In terms of hierarchy: +This section describes how projects and namespaces work with Rancher. It covers the following topics: -- Clusters contain projects. -- Projects contain namespaces. +- [About namespaces](#about-namespaces) +- [About projects](#about-projects) + - [The cluster's default project](#the-cluster-s-default-project) + - [The system project](#the-system-project) +- [Project authorization](#project-authorization) +- [Pod security policies](#pod-security-policies) +- [Creating projects](#creating-projects) +- [Switching between clusters and projects](#switching-between-clusters-and-projects) -Within Rancher, projects allow you manage multiple namespaces as a single entity. In the base version of Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! +# About Namespaces -Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each namespace. +A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) + +> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. + +Namespaces provide the following functionality: + +- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. +- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. + +You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. + +You can assign the following resources directly to namespaces: + +- [Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/) +- [Load Balancers/Ingress]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) +- [Service Discovery Records]({{}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) +- [Persistent Volume Claims]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates]({{}}/rancher/v2.x/en/k8s-in-rancher/certificates/) +- [ConfigMaps]({{}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) +- [Registries]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) +- [Secrets]({{}}/rancher/v2.x/en/k8s-in-rancher/secrets/) + +>**Note:** Although you can assign role-based access to namespaces in the base version of Kubernetes, you cannot assign roles to namespaces in Rancher. Instead, assign role-based access at the project level. + +For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.x/en/project-admin/namespaces/). + +# About Projects + +Within Rancher, a project can contain multiple namespaces and access control policies, making it possible to organize and isolate resources within the project. + +A project is a concept introduced by Rancher that allows you manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.x/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.x/en/k8s-in-rancher/) + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. You can use projects to perform actions like: -- Assign users access to a group of namespaces (i.e., [project membership]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). +- Assign users access to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members)). +- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - Assign resources to the project. - Assign Pod Security Policies. - -When you create a cluster, two project are automatically created within it: +When you create a cluster, two projects are automatically created within it: - [Default Project](#default-project) - [System Project](#system-project) +### The Cluster's Default Project -### Default Project +When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. -When you provision a cluster, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. +If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. -### System Project +If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. + +### The System Project _Available as of v2.0.7_ @@ -56,23 +104,32 @@ The `system` project: >**Note:** In clusters where both: > -> - The [Canal network plug-in]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#canal) is in use. +> - The [Canal network plug-in]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#canal) is in use. > - The Project Network Isolation option is enabled. > >The `system` project overrides the Project Network Isolation option so that it can communicate with other projects, collect logs, and check health. -### Authorization +# Project Authorization -Non-administrative users are only authorized for project access after an administrator, cluster owner or cluster member explicitly adds them to the project's **Members** tab. +Standard users are only authorized for project access in two situations: ->**Exception:** -> Non-administrative users can access projects that they create themselves. +- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. +- Standard users can access projects that they create themselves. -### Pod Security Policies +# Pod Security Policies -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the project level in addition to the cluster level. However, as a best practice, we recommend applying Pod Security Policies at the cluster level. +Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. -### Creating Projects +# Creating Projects + +This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. + +1. [Name a new project.](#1-name-a-new-project) +2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) +3. [Recommended: Add project members.](#3-recommended-add-project-members) +4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) + +### 1. Name a New Project 1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. @@ -80,91 +137,61 @@ Rancher extends Kubernetes to allow the application of [Pod Security Policies](h 1. Enter a **Project Name**. -1. **Optional:** Select a **Pod Security Policy**. Assigning a PSP to a project will: +### 2. Optional: Select a Pod Security Policy - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. +This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/). - >**Note:** This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). +Assigning a PSP to a project will: -1. **Recommended:** Add project members. +- Override the cluster's default PSP. +- Apply the PSP to the project. +- Apply the PSP to any namespaces you add to the project later. - Use the **Members** section to provide other users with project access and roles. +### 3. Recommended: Add Project Members - By default, your user is added as the project `Owner`. +Use the **Members** section to provide other users with project access and roles. - 1. Click **Add Member**. +By default, your user is added as the project `Owner`. - 1. From the **Name** combo box, search for a user or group that you want to assign project access. +>**Notes on Permissions:** +> +>- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. +>- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). - >**Note:** You can only search for groups if external authentication is enabled. +To add members: - 1. From the **Role** drop-down, choose a role. +1. Click **Add Member**. +1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. +1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - [What are Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) +### 4. Optional: Add Resource Quotas - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). +_Available as of v2.1.0_ - 1. To add more members, repeat substeps a—c. - -1. **Optional:** Add **Resource Quotas**, which limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). - - >**Note:** This option is available as of v2.1.0. - - 1. Click **Add Quota**. - - 1. Select a [Resource Type]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). - - 1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - - 1. **Optional:** Repeat these substeps to add more quotas. - -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) - >**Note:** This option is available as of v2.2.0. +Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). +To add a resource quota, +1. Click **Add Quota**. +1. Select a [Resource Type]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. +1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.x/en/project-admin/resource-quotas/#setting-container-default-resource-limit) Note: This option is available as of v2.2.0. 1. Click **Create**. **Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. -## Switching between Clusters/Projects +| Field | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------- | +| Project Limit | The overall resource limit for the project. | +| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | + +# Switching between Clusters and Projects To switch between clusters and projects, use the **Global** drop-down available in the main menu. -![Global Menu]({{< baseurl >}}/img/rancher/global-menu.png) +![Global Menu]({{}}/img/rancher/global-menu.png) Alternatively, you can switch between projects and clusters using the main menu. - To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. -- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. - -## Namespaces - -Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. - -Although you assign resources at the project level so that each namespace can in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. - -Resources that you can assign directly to namespaces include: - -- [Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/configmaps/) -- [Registries]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/registries/) -- [Secrets]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/secrets/) - ->**Note:** Although you can assign role-based access to namespaces in the base version of Kubernetes, you cannot assign roles to namespaces in Rancher. Instead, assign role-based access at the project level. - -For more information, see [Namespaces]({{< baseurl >}}/rancher/v2.x/en/project-admin/namespaces/). +- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md index e79f9f20478..0c70a823b0d 100644 --- a/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/restoring-etcd/_index.md @@ -33,7 +33,7 @@ If your Kubernetes cluster is broken, you can restore the cluster from a snapsho **Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. -> **Note:** If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters that were provisioned using [nodes hosted in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. +> **Note:** If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters that were provisioned using [nodes hosted in an infrastructure provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. ## Recovering etcd without a Snapshot diff --git a/content/rancher/v2.x/en/cluster-admin/tools/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/_index.md index 7cec8deaebd..8a1a01be91f 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/_index.md @@ -1,8 +1,7 @@ --- -title: Configuring Tools +title: Tools for Logging, Monitoring, and Visibility weight: 2033 aliases: - - /rancher/v2.x/en/tools/ - /rancher/v2.x/en/tools/notifiers-and-alerts/ --- @@ -10,84 +9,41 @@ Rancher contains a variety of tools that aren't included in Kubernetes to assist -- [Notifiers](#notifiers) -- [Alerts](#alerts) +- [Notifiers and Alerts](#notifiers-and-alerts) - [Logging](#logging) - [Monitoring](#monitoring) +- [Istio](#istio) ## Notifiers and Alerts -Notifiers and alerts are two features that work together to inform you of events in the Rancher system. Notifiers are objects that you configure to leverage popular IT services, which send you notification of Rancher events. Alerts are rule sets that trigger when those notifications are sent. +Notifiers and alerts are two features that work together to inform you of events in the Rancher system. -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -### Notifiers - -Before you can receive alerts, you must configure one or more notifier in Rancher. - -_Notifiers_ are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Rancher integrates with a variety of popular IT services, including: - -- Slack: Send alert notifications to your Slack channels. -- Email: Choose email recipients for alert notifications. -- PagerDuty: Route notifications to staff by phone, SMS, or personal email. -- Webhooks: Update a webpage with alert notifications. -- WeChat: Send alert notifications to your Enterprise WeChat contacts. - -For more information, see [Notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/). - -### Alerts - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need stay informed of events occurring in your clusters, both planned and unplanned. To help you stay informed of these events, Rancher allows you to configure alerts. - -_Alerts_ are sets of rules, chosen by you, to monitor for specific events. The scope for alerts can be set at either the cluster or project level. - -Some examples of alert events are: - -- A Kubernetes [master component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. -- A node or [workload]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. -- A scheduled deployment taking place as planned. -- A node's hardware resources becoming overstressed. - -When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Additionally, you can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. - -You can configure alerts at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/alerts/). +[Notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. +[Alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts) are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. ## Logging -Rancher can integrate with popular external services used for event streams, telemetry, or search. Rancher can integrate with the following services: +Logging is helpful because it allows you to: -- Elasticsearch -- splunk -- kafka -- syslog -- fluentd +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems -These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. -When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. Additionally, you'll have the opportunity to enter key value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key value pairs. - -### Logging Advantages - -Setting up a logging service to collect logs from your cluster or project is helpful several ways: - -- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. -- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. -- Helps you when troubleshooting or debugging. -- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. - -You can configure these services to collect logs at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/logging/). +For details, refer to the [logging section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) ## Monitoring _Available as of v2.2.0_ -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. Prometheus provides a _time series_ of your data, which is a stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring) -In other words, Prometheus let's you view metrics from your different Rancher and Kubernetes objects. Using timestamps, you can query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. Multi-tenancy support in terms of cluster and project-only Prometheus instances are also supported. +## Istio -You can configure these services to collect logs at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/). + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. For details on how to enable Istio in Rancher, refer to the [Istio section.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md index 6f2805a315f..d8d19368108 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/alerts/_index.md @@ -3,11 +3,38 @@ title: Alerts weight: 2 --- -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. To help you stay informed of these events, you can configure alerts. +To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. -Alerts are sets of rules, chosen by you, to monitor for specific events. +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. -## Alerts Scope +Before you can receive alerts, you must configure one or more notifier in Rancher. + +When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. + +For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) + +This section covers the following topics: + +- [Alert event examples](#alert-event-examples) +- [Urgency levels](#urgency-levels) +- [Scope of alerts](#scope-of-alerts) +- [Adding cluster alerts](#adding-cluster-alerts) +- [Managing cluster alerts](#managing-cluster-alerts) + +# Alert Event Examples + +Some examples of alert events are: + +- A Kubernetes [master component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. +- A node or [workload]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. +- A scheduled deployment taking place as planned. +- A node's hardware resources becoming overstressed. + +# Urgency Levels + +You can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. + +# Scope of Alerts The scope for alerts can be set at either the cluster level or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/alerts/). @@ -18,7 +45,7 @@ At the cluster level, Rancher monitors components in your Kubernetes cluster, an - The resource events from specific system services. - The Prometheus expression cross the thresholds -## Adding Cluster Alerts +# Adding Cluster Alerts As a [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. @@ -150,7 +177,7 @@ This alert type monitors for the overload from Prometheus expression querying, i - [**Node**](https://github.com/prometheus/node_exporter) - [**Container**](https://github.com/google/cadvisor) - - [**ETCD**](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/monitoring.md) + - [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) - [**Kubernetes Components**](https://github.com/kubernetes/metrics) - [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) - [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging)) @@ -198,7 +225,7 @@ This alert type monitors for the overload from Prometheus expression querying, i **Result:** Your alert is configured. A notification is sent when the alert is triggered. -## Managing Cluster Alerts +# Managing Cluster Alerts After you set up cluster alerts, you can manage each alert object. To manage alerts, browse to the cluster containing the alerts, and then select **Tools > Alerts** that you want to manage. You can: diff --git a/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md new file mode 100644 index 00000000000..13277b3fbc4 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md @@ -0,0 +1,57 @@ +--- +title: Default Alerts for Cluster Monitoring +weight: 1 +--- + +When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them. + +Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{< baseurl >}} +/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). + +# Alerts for etcd +Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. + +A leader is the node that handles all client requests that need cluster consensus. For more information, you can refer to this [explanation of how etcd works.](https://rancher.com/blog/2019/2019-01-29-what-is-etcd/#how-does-etcd-work) + +The leader of the cluster can change in response to certain events. It is normal for the leader to change, but too many changes can indicate a problem with the network or a high CPU load. With longer latencies, the default etcd configuration may cause frequent heartbeat timeouts, which trigger a new leader election. + +| Alert | Explanation | +|-------|-------------| +| A high number of leader changes within the etcd cluster are happening | A warning alert is triggered when the leader changes more than three times in one hour. | +| Database usage close to the quota 500M | A warning alert is triggered when the size of etcd exceeds 500M.| +| Etcd is unavailable | A critical alert is triggered when etcd becomes unavailable. | +| Etcd member has no leader | A critical alert is triggered when the etcd cluster does not have a leader for at least three minutes. | + + +# Alerts for Kubernetes Components +Rancher provides alerts when core Kubernetes system components become unhealthy. + +Controllers update Kubernetes resources based on changes in etcd. The [controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. + +The [scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is a core component of Kubernetes. It is responsible for scheduling cluster workloads to nodes, based on various configurations, metrics, resource requirements and workload-specific requirements. + +| Alert | Explanation | +|-------|-------------| +| Controller Manager is unavailable | A critical warning is triggered when the cluster’s controller-manager becomes unavailable. | +| Scheduler is unavailable | A critical warning is triggered when the cluster’s scheduler becomes unavailable. | + + +# Alerts for Events +Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. In the Rancher UI, from the project view, you can see events for each workload. + +| Alert | Explanation | +|-------|-------------| +| Get warning deployment event | A warning alert is triggered when a warning event happens on a deployment. | + + +# Alerts for Nodes +Alerts can be triggered based on node metrics. Each computing resource in a Kubernetes cluster is called a node. [Nodes]({{}}/rancher/v2.x/en/cluster-admin/#kubernetes-cluster-node-components) can be either bare-metal servers or virtual machines. + +| Alert | Explanation | +|-------|-------------| +| High CPU load | A warning alert is triggered if the node uses more than 100 percent of the node’s available CPU seconds for at least three minutes. | +| High node memory utilization | A warning alert is triggered if the node uses more than 80 percent of its available memory for at least three minutes. | +| Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | + +# Project-level Alerts +When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/#default-project-level-alerts) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/_index.md index ccd346bdccb..71313bc816f 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/_index.md @@ -1,75 +1,87 @@ --- title: Istio weight: 5 +aliases: + - /rancher/v2.x/en/project-admin/istio/configuring-resource-allocations/_index.md + - /rancher/v2.x/en/cluster-admin/tools/istio/_index.md + - /rancher/v2.x/en/project-admin/istio/index.md --- +_Available as of v2.3.0_ -_Available as of v2.3.0-alpha5_ + [Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. -Using Rancher, you can connect, secure, control, and observe services through integration with [Istio](https://istio.io/), a leading open-source service mesh solution. Istio provides behavioral insights and operational control over the service mesh as a whole, offering a complete solution to satisfy the diverse requirements of microservice applications. + As a network of microservices changes and grows, the interactions between them can become more difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. -## Prerequisites +Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. -The required resource allocation for each service is listed in the [configuration options]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/istio/config/). Please review it before attempting to enable Istio. +This service mesh provides features that include but are not limited to the following: -In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding node selector for each Istio components. +- Traffic management features +- Enhanced monitoring and tracing +- Service discovery and routing +- Secure connections and service-to-service authentication with mutual TLS +- Load balancing +- Automatic retries, backoff, and circuit breaking -#### Default Resource Consumption +After Istio is enabled in a cluster, you can leverage Istio's control plane functionality with `kubectl`. -Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|-----------|---------------|---------------|-------------|-------------|------------- -istio-pilot |discovery| 500m | 2048Mi | 1000m | 4096Mi | Y - istio-telemetry |mixer| 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-policy | mixer | 1000m | 1024Mi | 4800m | 4096Mi | Y - istio-tracing | jaeger | 100m | 100Mi | 500m | 1024Mi | Y - prometheus | prometheus | 750m | 750Mi | 1000m | 1024Mi | Y - grafana | grafana | 100m | 100Mi | 200m | 512Mi | Y - Others | - | 500m | 500Mi | - | - | N - Total | - | 3950m | 5546Mi | - | - | - +Rancher's Istio integration comes with comprehensive visualization aids: -## Enabling Istio +- **Trace the root cause of errors with Jaeger.** [Jaeger](https://www.jaegertracing.io/) is an open-source tool that provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. Distributed tracing allows you to view an entire chain of calls, which might originate with a user request and traverse dozens of microservices. +- **Get the full picture of your microservice architecture with Kiali.** [Kiali](https://www.kiali.io/) provides a diagram that shows the services within a service mesh and how they are connected, including the traffic rates and latencies between them. You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. +- **Gain insights from time series analytics with Grafana dashboards.** [Grafana](https://grafana.com/) is an analytics platform that allows you to query, visualize, alert on and understand the data gathered by Prometheus. +- **Write custom queries for time series data with the Prometheus UI.** [Prometheus](https://prometheus.io/) is a systems monitoring and alerting toolkit. Prometheus scrapes data from your cluster, which is then used by Grafana. A Prometheus UI is also integrated into Rancher, and lets you write custom queries for time series data and see the results in the UI. -As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Istio to your Kubernetes cluster. +# Prerequisites -1. From the **Global** view, navigate to the cluster that you want to configure Istio for. +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. -1. Select **Tools > Istio** in the navigation bar. +# Setup Guide -1. Select **Enable** to show the [Istio configuration options]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/istio/config/). Enter in your desired configuration options. Ensure you have enough resources on your worker nodes to enable Istio. +Refer to the [setup guide]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. -1. Click **Save**. +# Disabling Istio -**Result:** The Istio application, `cluster-istio`, is added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the application is `active`, you can start using Istio. +To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio) +# Accessing Visualizations -## Using Istio for Metrics Visualization +> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, refer to [Access to Visualizations.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/rbac/#access-to-visualizations) -Once Istio is `active`, you can see visualizations of your Istio service mesh with Kiali, Jaeger, Grafana, and Prometheus, which are all open-source projects that Rancher has integrated with. +After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. -- **Kiali** helps you define, validate, and observe your Istio service mesh. Kiali shows you what services are in your mesh and how they are connected. Kiali includes Jaeger Tracing to provide distributed tracing out of the box. -- **Jaeger** is a distributed tracing system released as open source by Uber Technologies. It is used for monitoring and troubleshooting microservices-based distributed systems. -- **Grafana** is an analytics platform that allows you to query, visualize, alert on and understand your metrics. Grafana lets you visualize data from Prometheus. -- **Prometheus** is a systems monitoring and alerting toolkit. +Your access to the visualizations depend on your role. Grafana and Prometheus are only available for cluster owners. The Kiali and Jaeger UIs are available only to cluster owners by default, but cluster owners can allow project members to access them by editing the Istio settings. When you go to your project and click **Resources > Istio,** you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. -With Istio enabled, you can: +To see the visualizations, go to the cluster where Istio is set up and click **Tools > Istio.** You should see links to each UI at the top of the page. -- Access [Kiali UI](https://www.kiali.io/) by clicking the Kiali UI icon in the Istio page. -- Access [Jaeger UI](https://www.jaegertracing.io/) by clicking the Jaeger UI icon in the Istio page. -- Access [Grafana UI](https://grafana.com/) by clicking the Grafana UI icon in the Istio page. -- Access [Prometheus UI](https://prometheus.io/) by clicking the Prometheus UI icon in the Istio page. -- Go to a project to [view traffic graph, traffic metrics and manage traffic]({{< baseurl >}}/rancher/v2.x/en/project-admin/istio/). +You can also get to the visualization tools from the project view. -## Leveraging Istio in Projects +# Viewing the Kiali Traffic Graph -After you enable Istio, you can see traphic metrics and a traffic graph on the project level. You can see a traffic graph for all namespaces that have Istio sidecar injection enabled. For more information, refer to [How to Use Istio in Your Project]({{< baseurl >}}/rancher/v2.x/en/project-admin/istio/). +1. From the project view in Rancher, click **Resources > Istio.** +1. If you are a cluster owner, you can go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. -## Disabling Istio +# Viewing Traffic Metrics -To disable Istio: +Istio’s monitoring features provide visibility into the performance of all your services. -1. From the **Global** view, navigate to the cluster that you want to disable Istio for. +1. From the project view in Rancher, click **Resources > Istio.** +1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** Cluster owners can see all of the metrics, while project members can see a subset of the metrics. -1. Select **Tools > Istio** in the navigation bar. +# Architecture -1. Click **Disable Istio**, then click the red button again to confirm the disable action. +Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. -**Result:** The `cluster-istio` application in the cluster's `system` project gets removed. +Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. + +Enabling Istio in Rancher enables monitoring in the cluster, and enables Istio in all new namespaces that are created in a cluster. You need to manually enable Istio in preexisting namespaces. + +When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. + +For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). + +### Two Ingresses + +By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio/_index.md new file mode 100644 index 00000000000..3cba3d5a86c --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/disabling-istio/_index.md @@ -0,0 +1,27 @@ +--- +title: Disabling Istio +weight: 4 +--- + +This section describes how to disable Istio in a cluster, namespace, or workload. + +# Disable Istio in a Cluster + +To disable Istio, + +1. From the **Global** view, navigate to the cluster that you want to disable Istio for. +1. Click **Tools > Istio.** +1. Click **Disable,** then click the red button again to confirm the disable action. + +**Result:** The `cluster-istio` application in the cluster's `system` project gets removed. The Istio sidecar cannot be deployed on any workloads in the cluster. + +# Disable Istio in a Namespace + +1. In the Rancher UI, go to the project that has the namespace where you want to disable Istio. +1. On the **Workloads** tab, you will see a list of namespaces and the workloads deployed in them. Go to the namespace where you want to disable and click the **Ellipsis (...) > Disable Istio Auto Injection.** + +**Result:** When workloads are deployed in this namespace, they will not have the Istio sidecar. + +# Remove the Istio Sidecar from a Workload + +Disable Istio in the namespace, then redeploy the workloads with in it. They will be deployed without the Istio sidecar. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/rbac/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/rbac/_index.md new file mode 100644 index 00000000000..eb6f3c20fa7 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/rbac/_index.md @@ -0,0 +1,58 @@ +--- +title: Role-based Access Control +weight: 3 +--- + +This section describes the permissions required to access Istio features and how to configure access to the Kiali and Jaeger visualizations. + +# Cluster-level Access + +By default, only cluster administrators can: + +- Enable Istio for the cluster +- Configure resource allocations for Istio +- View each UI for Prometheus, Grafana, Kiali, and Jaeger + +# Project-level Access + +After Istio is enabled in a cluster, project owners and members have permission to: + +- Enable and disable Istio sidecar auto-injection for namespaces +- Add the Istio sidecar to workloads +- View the traffic metrics and traffic graph for the cluster +- View the Kiali and Jaeger visualizations if cluster administrators give access to project members +- Configure Istio's resources (such as the gateway, destination rules, or virtual services) with `kubectl` (This does not apply to read-only project members) + +# Access to Visualizations + +By default, the Kiali and Jaeger visualizations are restricted to the cluster owner because the information in them could be sensitive. + +**Jaeger** provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. + +**Kiali** provides a diagram that shows the services within a service mesh and how they are connected. + +Rancher supports giving groups permission to access Kiali and Jaeger, but not individuals. + +To configure who has permission to access the Kiali and Jaeger UI, + +1. Go to the cluster view and click **Tools > Istio.** +1. Then go to the **Member Access** section. If you want to restrict access to certain groups, choose **Allow cluster owner and specified members to access Kiali and Jaeger UI.** Search for the groups that you want to have access to Kiali and Jaeger. If you want all members to have access to the tools, click **Allow all members to access Kiali and Jaeger UI.** +1. Click **Save.** + +**Result:** The access levels for Kiali and Jaeger have been updated. + +# Summary of Default Permissions for Istio Users + +| Permission | Cluster Administrators | Project Owners | Project Members | Read-only Project Members | +|------------------------------------------|----------------|----------------|-----------------|---------------------------| +| Enable and disable Istio for the cluster | ✓ | | | | +| Configure Istio resource limits | ✓ | | | | +| Control who has access to Kiali and the Jaeger UI | ✓ | | | | +| Enable and disable Istio for a namespace | ✓ | ✓ | ✓ | | +| Enable and disable Istio on workloads | ✓ | ✓ | ✓ | | +| Configure Istio with `kubectl` | ✓ | ✓ | ✓ | | +| View Prometheus UI and Grafana UI | ✓ | | | | +| View Kiali UI and Jaeger UI ([Configurable](#access-to-visualizations)) | ✓ | | | | +| View Istio project dashboard, including traffic metrics* | ✓ | ✓ | ✓ | ✓ | + +* By default, only the cluster owner will see the traffic graph. Project members will see only a subset of traffic metrics. Project members cannot see the traffic graph because it comes from Kiali, and access to Kiali is restricted to cluster owners by default. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/config/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/resources/_index.md similarity index 55% rename from content/rancher/v2.x/en/cluster-admin/tools/istio/config/_index.md rename to content/rancher/v2.x/en/cluster-admin/tools/istio/resources/_index.md index a4080f6cc23..9b0dea50923 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/config/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/resources/_index.md @@ -1,15 +1,61 @@ --- -title: Istio Configuration +title: CPU and Memory Allocations weight: 1 +aliases: + - /rancher/v2.x/en/project-admin/istio/configuring-resource-allocations/_index.md + - /rancher/v2.x/en/project-admin/istio/config/_index.md --- +_Available as of v2.3.0_ -_Available as of v2.3.0-alpha5_ +This section describes the minimum recommended computing resources for the Istio components in a cluster. -There are several configuration options for Istio. You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/docs/concepts/what-is-istio). +The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) -## PILOT +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. -Pilot provides service discovery for the Envoy sidecars, traffic management capabilities for intelligent routing (e.g., A/B tests, canary rollouts, etc.), and resiliency (timeouts, retries, circuit breakers, etc.). +> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. + +The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each central Istio component. + +In Kubernetes, the resource request indicates that the workload will not deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) + +Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable +---------|-----------|---------------|---------------|-------------|-------------|------------- +istio-pilot |discovery| 500m | 2048Mi | 1000m | 4096Mi | Y + istio-telemetry |mixer| 1000m | 1024Mi | 4800m | 4096Mi | Y + istio-policy | mixer | 1000m | 1024Mi | 4800m | 4096Mi | Y + istio-tracing | jaeger | 100m | 100Mi | 500m | 1024Mi | Y + prometheus | prometheus | 750m | 750Mi | 1000m | 1024Mi | Y + grafana | grafana | 100m | 100Mi | 200m | 512Mi | Y + Others | - | 500m | 500Mi | - | - | N + **Total** | **-** | **3950m** | **5546Mi** | **>12300m** | **>14848Mi** | **-** + + +# Configuring Resource Allocations + +You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. + +To make it easier to schedule the workloads to a node, a cluster administrator can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. + +You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/docs/concepts/what-is-istio). + +To configure the resources allocated to an Istio component, + +1. In Rancher, go to the cluster where you have Istio installed. +1. Click **Tools > Istio.** This opens the Istio configuration page. +1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. +1. Click **Save.** + +**Result:** The resource allocations for the Istio components are updated. + +## Pilot + +[Pilot](https://istio.io/docs/ops/deployment/architecture/#pilot) provides the following: + +- Authentication configuration +- Service discovery for the Envoy sidecars +- Traffic management capabilities for intelligent routing (A/B tests and canary rollouts) +- Configuration for resiliency (timeouts, retries, circuit breakers, etc) For more information on Pilot, refer to the [documentation](https://istio.io/docs/concepts/traffic-management/#pilot-and-envoy). @@ -22,9 +68,11 @@ Pilot Memory Reservation | Memory resource requests for the istio-pilot pod. | Y Trace sampling Percentage | [Trace sampling percentage](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/#trace-sampling) | Yes | 1 Pilot Selector | Ability to select the nodes in which istio-pilot pod is deployed to. To use this option, the nodes must have labels. | No | n/a -## MIXER +## Mixer -Mixer is a platform-independent component. Mixer enforces access control and usage policies across the service mesh, and collects telemetry data from the Envoy proxy and other services. For more information on Mixer, policies and telemetry, refer to the [documentation](https://istio.io/docs/concepts/policies-and-telemetry/). +[Mixer](https://istio.io/docs/ops/deployment/architecture/#mixer) enforces access control and usage policies across the service mesh. It also integrates with plugins for monitoring tools such as Prometheus. The Envoy sidecar proxy passes telemetry data and monitoring data to Mixer, and Mixer passes the monitoring data to Prometheus. + +For more information on Mixer, policies and telemetry, refer to the [documentation](https://istio.io/docs/concepts/policies-and-telemetry/). Option | Description| Required | Default -------|------------|-------|------- @@ -39,9 +87,9 @@ Mixer Policy Memory Limit | Memory resource limit for the istio-policy pod. | Ye Mixer Policy Memory Reservation | Memory resource requests for the istio-policy pod. | Yes, when policy enabled | 1024 Mixer Selector | Ability to select the nodes in which istio-policy and istio-telemetry pods are deployed to. To use this option, the nodes must have labels. | No | n/a -## TRACING +## Tracing -Istio-enabled applications can collect trace spans. For more information on distributed tracing with Istio, refer to the [documentation](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/). +[Distributed tracing](https://istio.io/docs/tasks/telemetry/distributed-tracing/overview/) enables users to track a request through a service mesh. This makes it easier to troubleshoot problems with latency, parallelism and serialization. Option | Description| Required | Default -------|------------|-------|------- @@ -52,9 +100,11 @@ Tracing Memory Limit | Memory resource limit for the istio-tracing pod. | Yes Tracing Memory Reservation | Memory resource requests for the istio-tracing pod. | Yes | 100 Tracing Selector | Ability to select the nodes in which tracing pod is deployed to. To use this option, the nodes must have labels. | No | n/a -## INGRESS GATEWAY +## Ingress Gateway -The Istio Gateway allows Istio features such as monitoring and route rules to be applied to traffic entering the cluster. For more information, refer to the [documentation](https://istio.io/docs/tasks/traffic-management/ingress/). +The Istio gateway allows Istio features such as monitoring and route rules to be applied to traffic entering the cluster. This gateway is a prerequisite for outside traffic to make requests to Istio. + +For more information, refer to the [documentation](https://istio.io/docs/tasks/traffic-management/ingress/). Option | Description| Required | Default -------|------------|-------|------- @@ -70,7 +120,7 @@ Ingress Gateway Memory Limit | Memory resource limit for the istio-ingressgatewa Ingress Gateway Memory Reservation | Memory resource requests for the istio-ingressgateway pod. | Yes | 128 Ingress Gateway Selector | Ability to select the nodes in which istio-ingressgateway pod is deployed to. To use this option, the nodes must have labels. | No | n/a -## PROMETHEUS +## Prometheus You can query for Istio metrics using Prometheus. Prometheus is an open-source systems monitoring and alerting toolkit. @@ -83,9 +133,9 @@ Prometheus Memory Reservation | Memory resource requests for the Prometheus pod. Retention for Prometheus | How long your Prometheus instance retains data | Yes | 6 Prometheus Selector | Ability to select the nodes in which Prometheus pod is deployed to. To use this option, the nodes must have labels.| No | n/a -## GRAFANA +## Grafana -You can visualize metrics with Grafana. Grafana is a tool that lets you visualize Istio traffic data. +You can visualize metrics with Grafana. Grafana lets you visualize Istio traffic data scraped by Prometheus. Option | Description| Required | Default -------|------------|-------|------- @@ -99,6 +149,4 @@ Enable Persistent Storage for Grafana | Enable Persistent Storage for Grafana | Source | Use a Storage Class to provision a new persistent volume or Use an existing persistent volume claim | Yes, when Grafana enabled and enabled PV | Use SC Storage Class | Storage Class for provisioning PV for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | Use the default class Persistent Volume Size | The size for the PV you would like to provision for Grafana | Yes, when Grafana enabled, enabled PV and use storage class | 5Gi -Existing Claim | Use existing PVC for Grafna | Yes, when Grafana enabled, enabled PV and use existing PVC | n/a - - +Existing Claim | Use existing PVC for Grafana | Yes, when Grafana enabled, enabled PV and use existing PVC | n/a diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md new file mode 100644 index 00000000000..da1fbcacc7a --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md @@ -0,0 +1,28 @@ +--- +title: Setup Guide +weight: 2 +--- + +This section describes how to enable Istio and start using it in your projects. + +This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. + +If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. + +> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) + +1. [Enable Istio in the cluster.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) +1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) +1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) +1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) +1. [Set up the Istio gateway. ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) +1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) +1. [Generate traffic and see Istio in action.](#generate-traffic-and-see-istio-in-action) + +# Prerequisites + +This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.x/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning) on which you will install Istio. + +The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) + +The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md new file mode 100644 index 00000000000..38bb20f588a --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md @@ -0,0 +1,322 @@ +--- +title: 4. Add Deployments and Services with the Istio Sidecar +weight: 4 +--- + +> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. + +Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. + +To inject the Istio sidecar on an existing workload in the namespace, go to the workload, click the **Ellipsis (...),** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. + +Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see istio-init and istio-proxy alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. + +### 3. Add Deployments and Services + +Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. + +1. Go to the project inside the cluster you want to deploy the workload on. +1. In Workloads, click **Import YAML.** +1. Copy the below resources into the form. +1. Click **Import.** + +This will set up the following sample resources from Istio's example BookInfo app: + +Details service and deployment: + +- A `details` Service +- A ServiceAccount for `bookinfo-details` +- A `details-v1` Deployment + +Ratings service and deployment: + +- A `ratings` Service +- A ServiceAccount for `bookinfo-ratings` +- A `ratings-v1` Deployment + +Reviews service and deployments (three versions): + +- A `reviews` Service +- A ServiceAccount for `bookinfo-reviews` +- A `reviews-v1` Deployment +- A `reviews-v2` Deployment +- A `reviews-v3` Deployment + +Productpage service and deployment: + +This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. + +- A `productpage` service +- A ServiceAccount for `bookinfo-productpage` +- A `productpage-v1` Deployment + +### Resource YAML + +```yaml +# Copyright 2017 Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +``` + +### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md new file mode 100644 index 00000000000..9df03283a12 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md @@ -0,0 +1,22 @@ +--- +title: 1. Enable Istio in the Cluster +weight: 1 +--- + +This cluster uses the default Nginx controller to allow traffic into the cluster. + +A Rancher [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. + +1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. +1. Click **Tools > Istio.** +1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. +1. Click **Enable**. +1. Click **Save**. + +**Result:** Istio is enabled at the cluster level. + +The Istio application, `cluster-istio`, is added as an [application]({{}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. + +When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. + +### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md new file mode 100644 index 00000000000..948d15c7c05 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md @@ -0,0 +1,45 @@ +--- +title: 2. Enable Istio in a Namespace +weight: 2 +--- + +You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. + +This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. + +> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio enabled. + +1. In the Rancher UI, go to the cluster view. Click the **Projects/Namespaces** tab. +1. Go to the namespace where you want to enable the Istio sidecar auto injection and click the **Ellipsis (...).** +1. Click **Edit.** +1. In the **Istio sidecar auto injection** section, click **Enable.** +1. Click **Save.** + +**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. + +### Verifying that Automatic Istio Sidecar Injection is Enabled + +To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. + +### Excluding Workloads from Being Injected with the Istio Sidecar + +If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: + +``` +sidecar.istio.io/inject: “false” +``` + +To add the annotation to a workload, + +1. From the **Global** view, open the project that has the workload that should not have the sidecar. +1. Click **Resources > Workloads.** +1. Go to the workload that should not have the sidecar and click **Ellipsis (...) > Edit.** +1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. +1. Click **Add Annotation.** +1. In the **Key** field, enter `sidecar.istio.io/inject`. +1. In the **Value** field, enter `false`. +1. Click **Save.** + +**Result:** The Istio sidecar will not be injected into the workload. + +### [Next: Set up Taints and Tolerations]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md new file mode 100644 index 00000000000..47c9ff33812 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md @@ -0,0 +1,130 @@ +--- +title: 5. Set up the Istio Gateway +weight: 5 +--- + +The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. + +You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. + +To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two ingresses. + +You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. + +You can route traffic into the service mesh with a load balancer or just Istio's NodePort gateway. This section describes how to set up the NodePort gateway. + +For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) + +# Enable the Istio Gateway + +The ingress gateway is a Kubernetes service that will be deployed in your cluster. There is only one Istio gateway per cluster. + +1. Go to the cluster where you want to allow outside traffic into Istio. +1. Click **Tools > Istio.** +1. Expand the **Ingress Gateway** section. +1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.]({{}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/) +1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. +1. Click **Save.** + +**Result:** The gateway is deployed, which allows Istio to receive traffic from outside the cluster. + +# Add a Kubernetes Gateway that Points to the Istio Gateway + +To allow traffic to reach Ingress, you will also need to provide a Kubernetes gateway resource in your YAML that points to Istio's implementation of the ingress gateway to the cluster. + +1. Go to the namespace where you want to deploy the Kubernetes gateway and click **Import YAML.** +1. Upload the gateway YAML as a file or paste it into the form. An example gateway YAML is provided below. +1. Click **Import.** + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 +``` + +**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. + +Confirm that the resource exists by running: +``` +kubectl get gateway -A +``` + +The result should be something like this: +``` +NAME AGE +bookinfo-gateway 64m +``` + +### Access the ProductPage Service from a Web Browser + +To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: + +`http://:/productpage` + +To get the ingress gateway URL and port, + +1. Go to the `System` project in your cluster. +1. Within the `System` project, go to `Resources` > `Workloads` then scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. +1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. + +**Result:** You should see the BookInfo app in the web browser. + +For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) + +# Troubleshooting + +The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. + +### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller + +You can try the steps in this section to make sure the Kubernetes gateway is configured properly. + +In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: + +1. Go to the `System` project in your cluster. +1. Within the `System` project, go to the namespace `istio-system`. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. +1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. + +### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors/_index.md new file mode 100644 index 00000000000..aa7e807b095 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors/_index.md @@ -0,0 +1,38 @@ +--- +title: 3. Select the Nodes Where Istio Components Will be Deployed +weight: 3 +--- + +> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources) + +This section describes how use node selectors to configure Istio components to be deployed on a designated node. + +In larger deployments, it is strongly advised that Istio's infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. + +# Adding a Label to the Istio Node + +First, add a label to the node where Istio components should be deployed. This label can have any key-value pair. For this example, we will use the key `istio` and the value `enabled`. + +1. From the cluster view, go to the **Nodes** tab. +1. Go to a worker node that will host the Istio components and click **Ellipsis (...) > Edit.** +1. Expand the **Labels & Annotations** section. +1. Click **Add Label.** +1. In the fields that appear, enter `istio` for the key and `enabled` for the value. +1. Click **Save.** + +**Result:** A worker node has the label that will allow you to designate it for Istio components. + +# Configuring Istio Components to Use the Labeled Node + +Configure each Istio component to be deployed to the node with the Istio label. Each Istio component can be configured individually, but in this tutorial, we will configure all of the components to be scheduled on the same node for the sake of simplicity. + +For larger deployments, it is recommended to schedule each component of Istio onto separate nodes. + +1. From the cluster view, click **Tools > Istio.** +1. Expand the **Pilot** section and click **Add Selector** in the form that appears. Enter the node selector label that you added to the Istio node. In our case, we are using the key `istio` and the value `enabled.` +1. Repeat the previous step for the **Mixer** and **Tracing** sections. +1. Click **Save.** + +**Result:** The Istio components will be deployed on the Istio node. + +### [Next: Add Deployments and Services]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md new file mode 100644 index 00000000000..2048e779265 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md @@ -0,0 +1,61 @@ +--- +title: 6. Set up Istio's Components for Traffic Management +weight: 6 +--- + +A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. + +- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. +- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. + +This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. + +In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. + +After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. + +To deploy the virtual service and destination rules for the `reviews` service, + +1. Go to the project view and click **Import YAML.** +1. Copy resources below into the form. +1. Click **Import.** + +``` +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: reviews +spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + subset: v1 + weight: 50 + - destination: + host: reviews + subset: v3 + weight: 50 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: reviews +spec: + host: reviews + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 +``` +**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. + +### [Next: Generate and View Traffic]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic/_index.md new file mode 100644 index 00000000000..bb6c979e28d --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/view-traffic/_index.md @@ -0,0 +1,26 @@ +--- +title: 7. Generate and View Traffic +weight: 7 +--- + +This section describes how to view the traffic that is being managed by Istio. + +# The Kiali Traffic Graph + +Rancher integrates a Kiali graph into the Rancher UI. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. + +To see the traffic graph, + +1. From the project view in Rancher, click **Resources > Istio.** +1. Go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. + +If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. + +For additional tools and visualizations, you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. + +# Viewing Traffic Metrics + +Istio’s monitoring features provide visibility into the performance of all your services. + +1. From the project view in Rancher, click **Resources > Istio.** +1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md index 86ca4105843..b1431bf3750 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/logging/_index.md @@ -1,12 +1,19 @@ --- -title: Logging +title: Rancher Integration with Logging Services +description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. +metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." weight: 3 aliases: - /rancher/v2.x/en/tasks/logging/ - - /rancher/v2.x/en/tools/logging/ --- -Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debug and troubleshoot problems Rancher supports integration with the following services: @@ -16,9 +23,26 @@ Rancher supports integration with the following services: - Syslog - Fluentd +This section covers the following topics: + +- [How logging integrations work](#how-logging-integrations-work) +- [Requirements](#requirements) +- [Logging scope](#logging-scope) +- [Enabling cluster logging](#enabling-cluster-logging) + +# How Logging Integrations Work + +Rancher can integrate with popular external services used for event streams, telemetry, or search. These services can log errors and warnings in your Kubernetes infrastructure to a stream. + +These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. + +When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. + +Additionally, you'll have the opportunity to enter key-value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key-value pairs. + >**Note:** You can only configure one logging service per cluster or per project. -## Requirements +# Requirements The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: @@ -27,16 +51,7 @@ $ docker info | grep 'Logging Driver' Logging Driver: json-file ``` -## Advantages - -Setting up a logging service to collect logs from your cluster/project has several advantages: - -- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. -- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. -- Helps you when troubleshooting or debugging. -- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. - -## Logging Scope +# Logging Scope You can configure logging at either cluster level or project level. @@ -48,7 +63,7 @@ Logs that are sent to your logging service are from the following locations: - Pod logs stored at `/var/log/containers`. - Kubernetes system components logs stored at `/var/lib/rancher/rke/log/`. -## Enabling Cluster Logging +# Enabling Cluster Logging As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. @@ -84,7 +99,7 @@ As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global ``` openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. + 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. 1. (Optional) Complete the **Additional Logging Configuration** form. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md index f6f0a6acae1..e5aa8f8482f 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md @@ -21,6 +21,8 @@ If you are using rsyslog, please make sure your rsyslog authentication mode is ` 1. Select a **Log Severity** for events that are logged to the Syslog server. For more information on each severity level, see the [Syslog protocol documentation](https://tools.ietf.org/html/rfc5424#page-11). + - By specifying a **Log Severity** does not mean that will act as a filtering mechanism for logs. To do that you should use a parser on the Syslog server. + ## Encryption Configuration If your Syslog server is using **TCP** protocol and uses TLS, you need to select **Use TLS** and complete the **Encryption Configuration** form. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md index a1c48123f1d..ede960e2578 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/_index.md @@ -1,17 +1,37 @@ --- -title: Monitoring +title: Integrating Rancher and Prometheus for Cluster Monitoring +description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring weight: 4 --- _Available as of v2.2.0_ -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +This section covers the following topics: + +- [About Prometheus](#about-prometheus) +- [Monitoring scope](#monitoring-scope) +- [Enabling cluster monitoring](#enabling-cluster-monitoring) +- [Resource consumption](#resource-consumption) + - [Resource consumption of Prometheus pods](#resource-consumption-of-prometheus-pods) + - [Resource consumption of other pods](#resources-consumption-of-other-pods) + +# About Prometheus + +Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): + +You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/). >A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. -In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. Multi-tenancy support in terms of cluster and project-only Prometheus instances are also supported. +In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. -## Monitoring Scope +By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. + +Multi-tenancy support in terms of cluster-only and project-only Prometheus instances are also supported. + +# Monitoring Scope Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. @@ -23,7 +43,7 @@ Using Prometheus, you can monitor Rancher at both the cluster level and [project - [Project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. -## Enabling Cluster Monitoring +# Enabling Cluster Monitoring As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. @@ -35,13 +55,13 @@ As an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global 1. Click **Save**. -**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/) through the [Rancher dashboard]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/#rancher-dashboard) or directly from [Grafana]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). +**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/) through the [Rancher dashboard]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/#rancher-dashboard) or directly from [Grafana]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). -### Resource Consumption +# Resource Consumption When enabling cluster monitoring, you need to ensure your worker nodes and Prometheus pod have enough resources. The tables below provides a guide of how much resource consumption will be used. In larger deployments, it is strongly advised that the monitoring infrastructure be placed on dedicated nodes in the cluster. -#### Prometheus Pod Resource Consumption +### Resource Consumption of Prometheus Pods This table is the resource consumption of the Prometheus pod, which is based on the number of all the nodes in the cluster. The count of nodes includes the worker, control plane and etcd nodes. Total disk space allocation should be approximated by the `rate * retention` period set at the cluster level. When enabling cluster level monitoring, you should adjust the CPU and Memory limits and reservation. @@ -69,7 +89,7 @@ Additional pod resource requirements for cluster level monitoring. | Operator | prometheus-operator | 100m | 50Mi | 200m | 100Mi | Y | -#### Other Pods Resource Consumption +### Resource Consumption of Other Pods Besides the Prometheus pod, there are components that are deployed that require additional resources on the worker nodes. @@ -78,4 +98,4 @@ Pod | CPU (milli CPU) | Memory (MB) Node Exporter (Per Node) | 100 | 30 Kube State Cluster Monitor | 100 | 130 Grafana | 100 | 150 -Prometheus Cluster Monitoring Nginx | 50 | 50 +Prometheus Cluster Monitoring Nginx | 50 | 50 \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md index 256bfa306eb..14c797848cf 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md @@ -101,7 +101,7 @@ Workload metrics display the hardware utilization for a Kubernetes workload. You 1. From the **Global** view, navigate to the project that you want to view workload metrics. -1. Select **Workloads > Workloads** in the navigation bar. +1. From the main navigation bar, choose **Resources > Workloads.** In versions prior to v2.3.0, choose **Workloads** on the main navigation bar. 1. Select a specific workload and click on its name. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/_index.md index 4b52c207fae..a667264c69c 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/expression/_index.md @@ -238,8 +238,8 @@ weight: 4 | Catalog | Expression | | --- | --- | - | Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accpeted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| - | Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accpeted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| + | Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| + | Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - **Ingress Controller Request Process Time** diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md index 6880c8aec52..28ccf295c9b 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md @@ -15,9 +15,9 @@ Rancher's dashboards are available at multiple locations: - **Cluster Dashboard**: From the **Global** view, navigate to the cluster. - **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** -- **Workload Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Click **Workload Metrics.** +- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** - **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** -- **Container Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** +- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. @@ -41,19 +41,20 @@ Grafana allows you to query, visualize, alert, and ultimately, understand your c Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. -### Accessing Grafana from the Grafana Instance +When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. -1. From the **Global** view, navigate to the cluster that you want to access Grafana. +### Accessing the Cluster-level Grafana Instance -1. From the main navigation bar, choose **Apps**. In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. +1. From the **Global** view, navigate to a cluster that has monitoring enabled. -1. Find the application based on what level of metrics you want to view: +1. Go to the **System** project view. This project is where the cluster-level Grafana instance runs. - - **Cluster Level**: Find the `cluster-monitoring` application. - - **Project Level**: Find the `project-monitoring` application. +1. Click **Apps.** In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. -1. Click the `/index.html` link. You will be redirected to a new webpage for Grafana, which shows metrics for either the cluster or project depending on which application you selected. +1. Go to the `cluster-monitoring` application. -1. Sign in to Grafana. The default username is `admin` and the default password is `admin`. For security, Rancher recommends changing the default password after logging in. +1. In the `cluster-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. -**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. +1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. + +**Results:** You are logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md index aec464ab46e..59a82734bd9 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md @@ -65,6 +65,7 @@ _Available as of v2.2.0_ 1. Select the **Recipient Type** and then enter a corresponding id to **Default Recipient** field, for example, the party id, tag id or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). {{% /accordion %}} +1. _Available as of v2.3.0_ - Select **Enable** for **Send Resolved Alerts** if you wish to notify about resolved alerts. 1. Click **Add** to complete adding the notifier. **Result:** Your notifier is added to Rancher. diff --git a/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md new file mode 100644 index 00000000000..2d7f62ea392 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/upgrading-kubernetes/_index.md @@ -0,0 +1,24 @@ +--- +title: Upgrading Kubernetes +weight: 70 +--- + +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) + +Following an upgrade to the latest version of Rancher, you can update your existing clusters to use the latest supported version of Kubernetes. + +Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For example, Rancher v2.3.0 is was tested with Kubernetes v1.15.4, v1.14.7, and v1.13.11. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.3.0/) + +As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) + +>**Recommended:** Before upgrading Kubernetes, [backup your cluster]({{< baseurl >}}/rancher/v2.x/en/backups). + +1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **Vertical Ellipsis (...) > Edit**. + +1. Expand **Cluster Options**. + +1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. + +1. Click **Save**. + +**Result:** Kubernetes begins upgrading for the cluster. During the upgrade, your cluster is unavailable. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md index 90c22786be7..c2b87d6095c 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md @@ -1,209 +1,58 @@ --- -title: Volumes and Storage +title: "Kubernetes Persistent Storage: Volumes and Storage Classes" +description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" weight: 2031 aliases: - - /rancher/v2.x/en/concepts/volumes-and-storage/ - /rancher/v2.x/en/tasks/clusters/adding-storage/ - - /rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ --- When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. -There are two ways to create persistent storage in Kubernetes: Persistent Volumes (PVs) and Storage Classes. +The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) -## Persistent Volumes +### Prerequisites -_Persistent Volumes_ are pre-provisioned storage volumes that you can bind to pods later. Each pre-provisioned volume corresponds to a Kubernetes persistent volume. When you start your application, it creates Persistent Volume Claims (PVCs) that bind to persistent volumes. A PVC corresponds to a Docker volume. Each PVC binds to one PV that includes the minimum resources that the PVC requires. The following figure illustrates the relationship between pods, PVCs, PVs, and the underlying cloud storage. +To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. -![Persistent Volumes]({{< baseurl >}}/img/rancher/persistent-volume.png) +If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -Rancher allows you to create PVs at the cluster level and bind them to PVCs later. Volumes are managed on a per-project basis. +For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -## Storage Classes +For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. -Storage Classes allow you to create PVCs dynamically without having to create PVs first. For example, an Amazon EBS Storage Class will dynamically create EBS volumes and bind them to PVCs. A Storage Class is similar to the notion of a _storage driver_. The following figure illustrates how a PVC creation triggers the dynamic provisioning of an underlying EBS volume. +### Setting up Existing Storage -![Storage Classes]({{< baseurl >}}/img/rancher/storage-classes.png) +The overall workflow for setting up existing storage is as follows: +1. Set up persistent storage in an infrastructure provider. +2. Add a persistent volume (PV) that refers to the persistent storage. +3. Add a persistent volume claim (PVC) that refers to the PV. +4. Mount the PVC as a volume in your workload. -### Storage and Cloud Providers +For details and prerequisites, refer to [this page.](./attaching-existing-storage) -When you provision persistent storage for a cluster [launched by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you must host your storage with the same provider that hosts the cluster. For example, if you're hosting your cluster on Amazon EC2, you must host your storage on Amazon EBS. To setup storage for your RKE-launched cluster, you must complete two tasks: enabling the **Cloud Provider** option for your cluster, and adding storage using the same provider. +### Dynamically Provisioning New Storage in Rancher -Enabling Cloud Provider Option/Choosing Storage Provider -![Cloud Provider]({{< baseurl >}}/img/rancher/cloud-provider.png) +The overall workflow for provisioning new storage is as follows: -Before you set up storage for a cluster launched by RKE, make sure that the **Cloud Provider** option for the cluster is enabled. [Cloud providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) are modules that allow you to use the provider's features in Rancher (like provisioning persistent storage). +1. Add a storage class and configure it to use your storage provider. +2. Add a persistent volume claim (PVC) that refers to the storage class. +3. Mount the PVC as a volume for your workload. -You can turn on the **Cloud Provider** option in one of two contexts: +For details and prerequisites, refer to [this page.](./provisioning-new-storage) -- [When provisioning your cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning) -- [When editing your cluster]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters) +### Provisioning Storage Examples -When you begin setting up a [persistent volume](#adding-persistent-volumes) or [storage class](#adding-storage-classes), you can choose the storage plugin or provisioner for your cloud provider. +We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) -#### Storage Classes and Cloud Providers +### GlusterFS Volumes -Additionally, storage classes feature a few extra settings for cloud providers. +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) -Each storage class contains the fields `provisioner`, `parameters`, and `reclaimPolicy`, which are used when a persistent volume that belongs to the class needs to be dynamically provisioned. +### iSCSI Volumes -The `provisioner` determines which volume plugin is used to provision the persistent volumes. +In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) -{{% accordion id="provisioners" label="Supported Storage Class Provisioners" %}} -- Amazon EBS Disk -- AzureFile -- AzureDisk -- Ceph RBD -- Gluster Volume -- Google Persistent Disk -- Longhorn -- Openstack Cinder Volume -- Portworx Volume -- Quobyte Volume -- ScaleIO Volume -- StorageOS -- Vmware vSphere Volume -{{% /accordion %}} -
- -In addition to customizing each provisioner's options for the storage class, you can also define the volume `reclaimPolicy`. There are two options available: - -- Delete volumes and underlying device when released by workloads. -- Retain the volume for manual cleanup. - -Finally, you can define custom `MountOptions` for the persistent volume created. - -`parameters` are specific to each cloud storage provisioner. For full information about the storage classes provisioner parameters, refer to the official [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). - -### Adding Persistent Volumes - -Your containers can store data on themselves, but if a container fails, that data is lost. To solve this issue, Kubernetes offers _persistent volumes_, which are external storage disks or file systems that your containers can access. If a container crashes, its replacement container can access the data in a persistent volume without any data loss. - -Persistent volumes can either be a disk or file system that you host on premise, or they can be hosted by a vendor, such as Amazon EBS or Azure Disk. - ->**Prerequisites:** -> ->- Permissions: `Manage Volumes` [role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) ->- You must have [storage provisioned]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/). ->- If provisioning storage for a cluster hosted in the cloud: -> -> - The storage and cluster hosts must be the [same provider](#storage-and-cloud-providers). -> - The [cloud providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) option must be enabled. - - -1. From the **Global** view, open the cluster running the containers that you want to add persistent volume storage to. - -1. From the main menu, select **Storage > Persistent Volumes**. - -1. Click **Add Volume**. - -1. Enter a **Name** for the persistent volume. - -1. Select the **Volume Plugin** for the disk type or service that you're using. - - >**Note:** When adding storage to a cluster that's hosted by a cloud provider: - > - >- You must enable the [cloud provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) option for the cluster. - >- You must use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it: - > - > - You must enable the `cloud provider` option for the EC2 cluster. - > - You must use the `Amazon EBS Disk` volume plugin. - -1. Enter the **Capacity** of your volume in gigabytes. - -1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. - -1. **Optional:** Complete the **Customize** form. This form features: - - - [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes): - - This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. - - - [Mount Options](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options): - - Each volume plugin allows you to specify additional command line options during the mounting process. You can enter these options in the **Mount Option** fields. Consult each plugin's vendor documentation for the mount options available. - - - **Assign to Storage Class:** - - If you later want to automatically provision persistent volumes identical to the volume that you've specified here, assign it a storage class. Later, when you create a workload, you can assign it a persistent volume claim that references the storage class, which will provision a persistent volume identical to the volume you've specified here. - - >**Note:** You must [add a storage class](#adding-storage-classes) before you can assign it to a persistent volume. - -1. Click **Save**. - -**Result:** Your new persistent volume is created. - - -### Adding Storage Classes - -_Storage Classes_ allow you to dynamically provision persistent volumes on demand. Think of storage classes as storage profiles that are created automatically upon a request (which is known as a _persistent volume claim_). - ->**Prerequisites:** -> ->- Permissions: `Manage Volumes` [role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) ->- You must have [storage provisioned]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/). ->- If provisioning storage for a cluster hosted in the cloud: -> -> - The storage and cluster hosts must be the [same provider](#storage-and-cloud-providers). -> - The [cloud providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) option must be enabled. - -1. From the **Global** view, open the cluster for which you want to dynamically provision persistent storage volumes. - -1. From the main menu, select `Storage > Storage Classes`. Click `Add Class`. - -1. Enter a `Name` for your storage class. - -1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. - - >**Note:** If the cluster you are adding a storage class for is a cloud service that also offers cloud storage, you must enable the `cloud provider` option for the cluster, and you must use the service's plug-in to use cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it: - > - > - You must enable the `cloud provider` option for the EC2 cluster. - > - You must use the `Amazon EBS Disk` provisioner. - -1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. - -1. Click `Save`. - -## iSCSI Volumes With Rancher Launched Kubernetes Clusters - -In [Rancher Launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. - -Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. - - -If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: - -| Platform | Package Name | Install Command | -| ------------- | ----------------------- | -------------------------------------- | -| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | -| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | - - -
-After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/etc/iscsi:/etc/iscsi" - - "/sbin/iscsiadm:/sbin/iscsiadm" -``` - - -## What's Next? - -Mount Persistent Volumes to workloads so that your applications can store their data. You can mount a either a manually created Persistent Volumes or a dynamically created Persistent Volume, which is created from a a Storage Class. - -You can mount Persistent Volumes in one of two contexts: - -- During deployment of a workload (recommended if possible). For more information, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). -- Following workload creation. For more information, see [Adding Persistent Volume Claims]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). - -## Related Links +### Related Links - [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md new file mode 100644 index 00000000000..bd1debc8674 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md @@ -0,0 +1,102 @@ +--- +title: Setting up Existing Storage +weight: 1 +--- + +This section describes how to set up existing persistent storage for workloads in Rancher. + +> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) + +To set up storage, follow these steps: + +1. [Set up persistent storage in an infrastructure provider.](#1-set-up-persistent-storage-in-an-infrastructure-provider) +2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) +3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) +4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-storage-claim-as-a-volume-in-your-workload) + +### Prerequisites + +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +### 1. Set up persistent storage in an infrastructure provider + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) + +### 2. Add a persistent volume that refers to the persistent storage + +These steps describe how to set up a persistent volume at the cluster level in Kubernetes. + +1. From the cluster view, select **Storage > Persistent Volumes**. + +1. Click **Add Volume**. + +1. Enter a **Name** for the persistent volume. + +1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. + +1. Enter the **Capacity** of your volume in gigabytes. + +1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. + +1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. + +1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. + +1. Click **Save**. + +**Result:** Your new persistent volume is created. + +### 3. Add a persistent volume claim that refers to the persistent volume + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the project containing a workload that you want to add a persistent volume claim to. + +1. Then click the **Volumes** tab and click **Add Volume**. (In versions prior to v2.3.0, click **Workloads** on the main navigation bar, then **Volumes.**) + +1. Enter a **Name** for the volume claim. + +1. Select the [Namespace]({{}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the workload that you want to add the persistent storage to. + +1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. + +1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 4. Mount the persistent volume claim as a volume in your workload + +Mount PVCs to stateful workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +The following steps describe how to assign existing storage to a new workload that is a stateful set: + +1. From the **Project** view, go to the **Workloads** tab. +1. Click **Deploy.** +1. Enter a name for the workload. +1. Next to the **Workload Type** field, click **More Options.** +1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. +1. Choose the namespace where the workload will be deployed. +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +The following steps describe how to assign persistent storage to an existing workload: + +1. From the **Project** view, go to the **Workloads** tab. +1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **Ellipsis (...) > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md new file mode 100644 index 00000000000..5eaa2de4859 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md @@ -0,0 +1,16 @@ +--- +title: Creating Persistent Storage in Amazon's EBS +weight: 3053 +--- + +This section describes how to set up Amazon's Elastic Block Store in EC2. + +1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** +1. Click **Create Volume.** +1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. +1. Click **Create Volume.** +1. Click **Close.** + +**Result:** Persistent storage has been created. + +For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.](../attaching-existing-storage) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md index 6383ba367f3..c91713c4bb0 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md @@ -3,16 +3,15 @@ title: NFS Storage weight: 3054 aliases: - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ - - /rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/ --- Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. >**Note:** > ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/). +>- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/). > ->- This procedure demonstrates how to setup an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. +>- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. >**Recommended:** To simplify the process of managing firewall rules, use NFSv4. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md index ee192cf9b17..8fcc55db032 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md @@ -3,7 +3,6 @@ title: vSphere Storage weight: 3055 aliases: - /rancher/v2.x/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ - - /rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/ --- To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume [storage class]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#storage-classes). This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). @@ -23,7 +22,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 3. Enter a **Name** for the class. 4. Under **Provisioner**, select **VMWare vSphere Volume**. - ![vsphere-storage-class]({{< baseurl >}}/img/rancher/vsphere-storage-class.png) + {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} 5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. 5. Click **Save**. @@ -37,12 +36,12 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. 6. Enter the required **Capacity** for the volume. Then click **Define**. - ![workload-add-volume]({{< baseurl >}}/img/rancher/workload-add-volume.png) + {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} 7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. 8. Click **Launch** to create the workload. -### Verifing Persistence of the Volume +### Verifying Persistence of the Volume 1. From the context menu of the workload you just created, click **Execute Shell**. 2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). @@ -50,7 +49,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 4. **Close** the shell window. 5. Click on the name of the workload to reveal detail information. 6. Open the context menu next to the Pod in the *Running* state. -7. Delete the Pod by selecting **Delete**. +7. Delete the Pod by selecting **Delete**. 8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. 9. Once the replacement pod is running, click **Execute Shell**. 10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md new file mode 100644 index 00000000000..c9e99f6822d --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md @@ -0,0 +1,32 @@ +--- +title: GlusterFS Volumes +weight: 5000 +--- + +> This section only applies to [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: + +- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) +- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) + +``` +docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version +``` + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/usr/bin/systemd-run:/usr/bin/systemd-run" +``` + +After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: + +``` +Detected OS with systemd +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md new file mode 100644 index 00000000000..a67c767cadd --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md @@ -0,0 +1,78 @@ +--- +title: How Persistent Storage Works +weight: 1 +aliases: + - /rancher/v2.x/en/tasks/workloads/add-persistent-volume-claim +--- + +A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. + +There are two ways to use persistent storage in Kubernetes: + +- Use an existing persistent volume +- Dynamically provision new persistent volumes + +To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. + +For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. + +![Setting Up New and Existing Persistent Storage]({{< baseurl >}}/img/rancher/rancher-storage.svg) + +For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) + +This section covers the following topics: + +- [About persistent volume claims](#about-persistent-volume-claims) + - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) +- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) + - [Binding PVs to PVCs](#binding-pvs-to-pvcs) +- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) + +# About Persistent Volume Claims + +Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. + +To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. + +Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions prior to v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. + +### PVCs are Required for Both New and Existing Persistent Storage + +A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. + +If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. + +If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. + +Rancher lets you create as many PVCs within a project as you'd like. + +You can mount PVCs to a deployment as you create it, or later, after the deployment is running. + +# Setting up Existing Storage with a PVC and PV + +Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. + +PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. + +### Binding PVs to PVCs + +When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. + +In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. + +To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. + +# Provisioning New Storage with a PVC and Storage Class + +Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. + +For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. + +The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. + diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md new file mode 100644 index 00000000000..0672bbbf6ee --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md @@ -0,0 +1,30 @@ +--- +title: iSCSI Volumes +weight: 6000 +--- + +In [Rancher Launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. + +Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. + +If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: + +| Platform | Package Name | Install Command | +| ------------- | ----------------------- | -------------------------------------- | +| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | +| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | + + +After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/etc/iscsi:/etc/iscsi" + - "/sbin/iscsiadm:/sbin/iscsiadm" +``` diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md deleted file mode 100644 index 23f3a45aabb..00000000000 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Persistent Volume Claims -weight: 3052 -aliases: - - /rancher/v2.x/en/tasks/workloads/add-persistent-volume-claim - - /rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ ---- - -_Persistent Volume Claims_ (or PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. When you create a deployment, you should usually attach a PVC so that your application can lay claim to persistent storage. This claim lets your deployment application store its data in an external location, so that if one of the application's containers fails, it can be replaced with a new container and continue accessing its data stored externally, as though an outage never occurred. - -- Rancher lets you create as many PVCs within a project as you'd like. -- You can mount PVCs to a deployment as you create it, or later after its running. -- Each Rancher project contains a list of PVCs that you've created, available from the **Volumes** tab. You can reuse these PVCs when creating deployments in the future. - ->**Prerequisite:** -> You must have a pre-provisioned [persistent volume]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-a-persistent-volume) available for use, or you must have a [storage class created]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-storage-classes) that dynamically creates a volume upon request from the workload. - -1. From the **Global** view, open the project containing a workload that you want to add a PVC to. - -1. From the main menu, make sure that **Workloads** is selected. Then select the **Volumes** tab. Click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the [Namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. - -1. Select a **Source** option: - - - **To dynamically provision a storage volume for the deployment:** - - 1. Choose **Use a Storage Class to provision a new persistent volume** - - 1. From the **Storage Class** drop-down, choose a pre-created storage class. - - 1. Enter a volume **Capacity**. - - - **To use an existing persistent volume:** - - 1. Choose **Use an existing persistent volume:** - - 1. From the **Persistent Volume** drop-down, choose a pre-created persistent volume. - -7. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -**Result:** Your PVC is created. You can now attach it to any workload in the project. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md new file mode 100644 index 00000000000..05ecaf4f436 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md @@ -0,0 +1,109 @@ +--- +title: Dynamically Provisioning New Storage in Rancher +weight: 2 +--- + +This section describes how to provision new persistent storage for workloads in Rancher. + +> This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) + +To provision new storage for your workloads, follow these steps: + +1. [Add a storage class and configure it to use your storage provider.](#1-add-a-storage-class-and-configure-it-to-use-your-storage-provider) +2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) +3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) + +### Prerequisites + +- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. +- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) +- Make sure your storage provisioner is available to be enabled. + +The following storage provisioners are enabled by default: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/) + +### 1. Add a storage class and configure it to use your storage provider + +These steps describe how to set up a storage class at the cluster level. + +1. Go to the cluster for which you want to dynamically provision persistent storage volumes. + +1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. + +1. Enter a `Name` for your storage class. + +1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. + +1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. + +1. Click `Save`. + +**Result:** The storage class is available to be consumed by a PVC. + +For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). + +### 2. Add a persistent volume claim that refers to the storage class + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the project containing a workload that you want to add a PVC to. + +1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. + +1. Enter a **Name** for the volume claim. + +1. Select the [Namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces) of the volume claim. + +1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** + +1. Go to the **Storage Class** drop-down and select the storage class that you created. + +1. Enter a volume **Capacity**. + +1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 3. Mount the persistent volume claim as a volume for your workload + +Mount PVCs to workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +To attach the PVC to a new workload, + +1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). +1. For **Workload Type**, select **Stateful set of 1 pod**. +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +To attach the PVC to an existing workload, + +1. Go to the project that has the workload that will have the PVC attached. +1. Go to the workload that will have persistent storage and click **Ellipsis (...) > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/_index.md b/content/rancher/v2.x/en/cluster-provisioning/_index.md index 65e758ab1d9..ad6df2689bd 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/_index.md @@ -1,95 +1,81 @@ --- -title: Provisioning Kubernetes Clusters +title: Setting up Kubernetes Clusters in Rancher +description: Provisioning Kubernetes Clusters weight: 2000 aliases: - /rancher/v2.x/en/concepts/clusters/ - /rancher/v2.x/en/concepts/clusters/cluster-providers/ - /rancher/v2.x/en/tasks/clusters/ - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/ --- -## What's a Kubernetes Cluster? +Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. -A cluster is a group of computers that work together as a single system. +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) page. -### Kubernetes Cluster Node Components - -Each computing resource in a Kubernetes Cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -#### etcd Nodes - -[etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) nodes run the etcd database. The etcd database component is a key value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. - -etcd is a distributed key value store, meaning it runs on multiple nodes so that there's always a backup available for fail over. Even though you can run etcd on a single node, you should run it on multiple nodes. We recommend 3, 5, or 7 etcd nodes for redundancy. - -#### Control Plane Nodes - -[Control plane](https://kubernetes.io/docs/concepts/#kubernetes-control-plane) nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although two or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -#### Worker Nodes - -[Worker nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) run: - -- _Kubelets_: An agent that monitors the state of the node, ensuring your containers are healthy. -- _Workloads_: The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your workloads. - -## Cluster Creation in Rancher - -Now that you know what a Kubernetes Cluster is, how does Rancher fit in? - -Rancher simplifies creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -![Rancher diagram]({{< baseurl >}}/img/rancher/ranchercomponentsdiagram.svg)
-Rancher components used for provisioning/managing Kubernetes clusters. - - -## Cluster Creation Options - -Options include: +This section covers the following topics: - -- [Hosted Kubernetes Cluster](#hosted-kubernetes-cluster) -- [Rancher Launched Kubernetes](#rancher-launched-kubernetes) - - [Nodes Hosted by an Infrastructure Provider](#nodes-hosted-by-an-infrastructure-provider) - - [Custom Nodes](#custom-nodes) -- [Import Existing Cluster](#import-existing-cluster) - +- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-cluster) +- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) + - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) + - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) +- [Importing Existing Cluster](#importing-existing-cluster) -### Hosted Kubernetes Cluster +The following table summarizes the options and settings available for each cluster type: -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage a hosted cluster from the Rancher UI. + Rancher Capability | RKE Launched | Hosted Kubernetes Cluster | Imported Cluster + ---------|----------|---------|---------| + Manage member roles | ✓ | ✓ | ✓ + Edit cluster options | ✓ | | + Manage node pools | ✓ | | -[Hosted Kubernetes Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters) +# Setting up Clusters in a Hosted Kubernetes Provider -### Rancher Launched Kubernetes +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. -Alternatively, you can use Rancher to create a cluster on your own nodes, using [Rancher Kubernetes Engine (RKE)]({{< baseurl >}}/rke/latest/en/). RKE is Rancher’s own lightweight Kubernetes installer. In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. These nodes can either: +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. -- Be provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. -- Be a prior existing node that's brought into the cluster by running a Rancher agent container on it. +For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters) -[Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) +# Launching Kubernetes with Rancher -#### Nodes Hosted by an Infrastructure Provider +Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/)as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. -Using Rancher, you can create pools of nodes based on a [node template]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. The cloud providers available for creating a node template are decided based on the [node drivers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. The benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher automatically replaces it, thus maintaining the expected cluster configuration. +In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. -[Nodes Hosted by an Infrastructure Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) +These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. -#### Custom Nodes +If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. -You can bring any nodes you want to Rancher and use them to create a cluster. These nodes include on-premise bare metal servers, cloud-hosted virtual machines, or on-premise virtual machines. +For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) -[Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) +### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider -### Import Existing Cluster +Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. -Users can import an existing Kubernetes cluster into Rancher. Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. All other Rancher features, including management of cluster, policy, and workloads, are available for imported clusters. +Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. -[Importing Existing Cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) +One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. + +The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. + +For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) + +### Launching Kubernetes on Existing Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. + +You can bring any nodes you want to Rancher and use them to create a cluster. + +These nodes include on-premise bare metal servers, cloud-hosted virtual machines, or on-premise virtual machines. + +# Importing Existing Clusters + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. All other Rancher features, including management of cluster, policy, and workloads, are available for imported clusters. + +For more information, refer to the section on [importing existing clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/) diff --git a/content/rancher/v2.x/en/cluster-provisioning/custom-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/custom-clusters/_index.md deleted file mode 100644 index 84c184fc6fc..00000000000 --- a/content/rancher/v2.x/en/cluster-provisioning/custom-clusters/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Custom Cluster -weight: 2210 ---- - -If you don't want to host your Kubernetes cluster in a [hosted kubernetes provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters) or provision nodes through Rancher, you can use the _custom cluster_ option to create a Kubernetes cluster in on-premise bare-metal servers, on-premise virtual machines, or in _any_ node hosted by an infrastructure provider. - -In this scenario, you'll bring the nodes yourself, and then configure them to meet Rancher's [requirements]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#requirements). Then, use the [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) install option to setup your cluster. diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md index 69f0250ac1f..8a5fc2495de 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md @@ -1,11 +1,13 @@ --- -title: Hosted Kubernetes Providers +title: Setting up Clusters from Hosted Kubernetes Providers weight: 2100 --- -You can use Rancher to create clusters in a hosted Kubernetes provider, such as Google GKE. +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. -In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-premise or in an infrastructure provider, all from the same UI. +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-premise or in an infrastructure provider. Rancher supports the following Kubernetes providers: diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md index 1a993231f66..39bb5c1c44b 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md @@ -50,6 +50,8 @@ Huawei CCE service doesn't support the ability to create clusters with public ac | Cluster Label | The labels for the cluster. | | Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | + **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 7. Fill the following node configuration of the cluster: |Settings|Description| diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md index ad9c0b4bbde..c3f8087e741 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md @@ -48,6 +48,8 @@ You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | + **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. | Option | Description | diff --git a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md index 3659965b958..e1cf1478588 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/imported-clusters/_index.md @@ -1,35 +1,43 @@ --- -title: Importing Kubernetes Clusters +title: Importing Existing Clusters into Rancher +description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher +metaTitle: "Kubernetes Cluster Management" +metaDescription: "Learn how you can import an existing Kubernetes cluster and then manage it using Rancher" weight: 2300 aliases: - /rancher/v2.x/en/tasks/clusters/import-cluster/ --- -You can import an existing Kubernetes cluster and then manage it using Rancher. Keep in mind that editing your Kubernetes cluster (for example: adding/removing nodes, upgrading Kubernetes cluster version and changing Kubernetes component parameters) still has to be done outside of Rancher. +When managing an imported cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. ->**Prerequisites:** -> ->- If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. In order to apply the privilege, you need to run `kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user [USER_ACCOUNT]` before running the `kubectl` command to import the cluster. ->- By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about GKE RBAC, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). +Keep in mind that editing your Kubernetes cluster still has to be done outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. + +### Prerequisites + +If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. + +In order to apply the privilege, you need to run: + +```plain +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user [USER_ACCOUNT] +``` +before running the `kubectl` command to import the cluster. + +By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +### Importing a Cluster 1. From the **Clusters** page, click **Add Cluster**. - 2. Choose **Import**. - 3. Enter a **Cluster Name**. - 4. {{< step_create-cluster_member-roles >}} - 5. Click **Create**. - 6. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. - 7. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in {{< product >}}. - 8. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in {{< product >}} to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. - 9. When you finish running the command(s) on your node, click **Done**. - {{< result_import-cluster >}} > **Note:** diff --git a/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md new file mode 100644 index 00000000000..24a0777534d --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md @@ -0,0 +1,206 @@ +--- +title: Node Requirements for User Clusters +weight: 1 +--- + +This page describes the requirements for the nodes where your apps and services will be installed. + +In this section, "user cluster" refers to a cluster running your apps, which should be separate from the cluster (or single node) running Rancher. + +> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server cluster and user clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.x/en/installation/requirements/) + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- [Operating systems and Docker requirements](#operating-systems-and-docker-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) +- [Optional: Security Considerations](#optional-security-considerations) + +# Operating Systems and Docker Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#requirements-for-windows-nodes) The capability to use Windows worker nodes in downstream clusters was added in Rancher v2.3.0. + +Rancher works has been tested and is supported with downstream clusters running Ubuntu, CentOS, Oracle Linux, RancherOS, and RedHat Enterprise Linux. For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) + +For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. This [how-to guide]({{}}/rancher/v2.x/en/installation/options/firewall) shows how to check the default firewall rules and how to open the ports with `firewalld` if necessary. + +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. + +### Requirements for Windows Nodes + +_Windows worker nodes can be used as of Rancher v2.3.0_ + +Nodes with Windows Server must run Docker Enterprise Edition. + +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found in the following sections: + +- [Commonly used ports](#commonly-used-ports) +- [Port requirements for custom clusters](#port-requirements-for-custom-clusters) +- [Port requirements for clusters hosted by an infrastructure provider](#port-requirements-for-clusters-hosted-by-an-infrastructure-provider) + - [Security group for nodes on AWS EC2](#security-group-for-nodes-on-aws-ec2) +- [Port requirements for clusters hosted by a Kubernetes provider](#port-requirements-for-clusters-hosted-by-a-kubernetes-provider) +- [Port requirements for imported clusters](#port-requirements-for-imported-clusters) +- [Port requirements for local traffic](#port-requirements-for-local-traffic) + +### Commonly Used Ports + +If security isn't a large concern and you're okay with opening a few additional ports, you can use this table as your port reference instead of the comprehensive tables in the following sections. + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +{{% accordion id="common-ports" label="Click to Expand" %}} + +
Commonly Used Ports Reference
+ +| Protocol | Port | Description | +|:--------: |:----------------: |------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | kubelet API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-
32767 | NodePort port range | + +{{% /accordion %}} + +### Port Requirements for Custom Clusters + +If you are launching a Kubernetes cluster on your existing infrastructure, refer to these port requirements. + +{{% accordion id="port-reqs-for-custom-clusters" label="Click to Expand" %}} + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). + +{{< ports-custom-nodes >}} + +{{% /accordion %}} + +### Port Requirements for Clusters Hosted by an Infrastructure Provider + +If you are launching a Kubernetes cluster on nodes that are in an infrastructure provider such as Amazon EC2, Google Container Engine, DigitalOcean, Azure, or vSphere, these port requirements apply. + +These required ports are automatically opened by Rancher during creation of clusters using cloud providers. + +{{% accordion id="port-reqs-for-infrastructure-providers" label="Click to Expand" %}} + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). + +>**Note:** +>The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +{{< ports-iaas-nodes >}} + +{{% /accordion %}} + +#### Security Group for Nodes on AWS EC2 + +When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Port Requirements for Clusters Hosted by a Kubernetes Provider + +If you are launching a cluster with a hosted Kubernetes provider such as Google Kubernetes Engine, Amazon EKS, or Azure Kubernetes Service, refer to these port requirements. + +{{% accordion id="port-reqs-for-hosted-kubernetes" label="Click to Expand" %}} + +The following table depicts the port requirements for nodes in [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). + +{{< ports-imported-hosted >}} + +{{% /accordion %}} + +### Port Requirements for Imported Clusters + +If you are importing an existing cluster, refer to these port requirements. + +{{% accordion id="port-reqs-for-imported-clusters" label="Click to Expand" %}} + +The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). + +{{< ports-imported-hosted >}} + +{{% /accordion %}} + +### Port Requirements for Local Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the port requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes/instances. + +# Optional: Security Considerations + +If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. + +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) + +# Opening SUSE Linux Ports + +SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, + +1. SSH into the instance. +1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: + ``` + FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" + FW_SERVICES_EXT_UDP="8472 30000:32767" + FW_ROUTE=yes + ``` +1. Restart the firewall with the new ports: + ``` + SuSEfirewall2 + ``` + +**Result:** The node has the open ports required to be added to a custom cluster. + diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md index 7711b93834d..d5ab40db6fd 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md @@ -1,137 +1,50 @@ --- -title: Production Ready Cluster +title: Checklist for Production-Ready Clusters weight: 2005 --- -While Rancher makes it easy to create Kubernetes clusters, a production ready cluster takes more consideration and planning. There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. In the next sections each of the roles will be described in more detail. +In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. -When designing your cluster(s), you have two options: +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. +This is a shortlist of best practices that we strongly recommend for all production clusters. ->**Note:** Do not add the `worker` role to any node configured with either the `etcd` or `controlplane` role. This will make the nodes schedulable for regular workloads, which could interfere with critical cluster components running on the nodes with the `etcd` or `controlplane` role. +For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices) -## etcd +### Node Requirements -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. +* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) including the port requirements. ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. +### Back up etcd -### Hardware Requirements +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. -Please see [Kubernetes: Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/) and [etcd: Hardware Recommendations](https://coreos.com/etcd/docs/latest/op-guide/hardware.html) for the hardware requirements. - -### Count of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones to survive the loss of one availability zone within a region. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [etcd cluster size](https://coreos.com/etcd/docs/latest/v2/admin_guide.html#optimal-cluster-size) -* [Operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Network Latency - -Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These settings allow etcd to run in most networks (except really high latency networks). - -References: - -* [etcd Tuning](https://coreos.com/etcd/docs/latest/tuning.html) - -### Backups - -etcd is the location where the state of your cluster is stored. Losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -## controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -References: - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) - -### Hardware Requirements - -Please see [Kubernetes: Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/) for the hardware requirements. - -### Count of controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. See below for a breakdown of how high availability is achieved per component. - -#### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -#### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -#### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -## worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -References: - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) - -### Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -### Count of worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -## Networking - -Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). - -## Cluster Diagram - -This diagram is applicable to Kubernetes clusters built using RKE or [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -## Production checklist +### Cluster Architecture * Nodes should have one of the following role configurations: * `etcd` * `controlplane` * `etcd` and `controlplane` * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Network traffic is only strictly allowed according to [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/). * Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. * Assign two or more nodes the `controlplane` role for master component high availability. * Assign two or more nodes the `worker` role for workload rescheduling upon node failure. -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. -* Perform load tests on your cluster to verify that its hardware can support your workloads. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) + +For more information about the +number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/) + +### Logging and Monitoring + * Configure alerts/notifiers for Kubernetes components (System Service). * Configure logging for cluster analysis and post-mortems. -## RKE cluster running Rancher HA +### Reliability -You may have noticed that our [High Availability (HA) Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, as: +* Perform load tests on your cluster to verify that its hardware can support your workloads. -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. +### Networking + +* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md new file mode 100644 index 00000000000..da12ee46111 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md @@ -0,0 +1,43 @@ +--- +title: Roles for Nodes in Kubernetes +weight: 1 +--- + +This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. + +This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + +![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
+Lines show the traffic flow between components. Colors are used purely for visual aid + +# etcd + +Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. + +>**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +# controlplane + +Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. + +>**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +### kube-apiserver + +The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. + +### kube-controller-manager + +The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +### kube-scheduler + +The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +# worker + +Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. + +# References + +* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md new file mode 100644 index 00000000000..8dd5a53dfde --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md @@ -0,0 +1,74 @@ +--- +title: Recommended Cluster Architecture +weight: 1 +--- + +There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. + +# Separating Worker Nodes from Nodes with Other Roles + +When designing your cluster(s), you have two options: + +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/). +* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. + +In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. + +Therefore, each node should have one of the following role configurations: + + * `etcd` + * `controlplane` + * Both `etcd` and `controlplane` + * `worker` + +# Recommended Number of Nodes with Each Role + +The cluster should have: + +- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +- At least two nodes with the role `controlplane` for master component high availability. +- At least two nodes with the role `worker` for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) + + +### Number of Controlplane Nodes + +Adding more than one node with the `controlplane` role makes every master component highly available. + +### Number of etcd Nodes + +The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. + +| Nodes with `etcd` role | Majority | Failure Tolerance | +|--------------|------------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | **1** | +| 4 | 3 | 1 | +| 5 | 3 | **2** | +| 6 | 4 | 2 | +| 7 | 4 | **3** | +| 8 | 5 | 3 | +| 9 | 5 | **4** | + +References: + +* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) +* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) + +### Number of Worker Nodes + +Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. + +### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications + +You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.x/en/installation/k8s-install/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +# References + +* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index 417cdf3c295..111b0a58faa 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -1,32 +1,34 @@ --- -title: Rancher Launched Kubernetes +title: Launching Kubernetes with Rancher weight: 2200 --- -If you don't want to use a hosted Kubernetes provider, you can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{< baseurl >}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: +You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - Bare-metal servers - On-premise virtual machines - Virtual machines hosted by an infrastructure provider -RKE launched clusters are separated into two categories: +Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. -- [Nodes Hosted by an Infrastructure Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/): - - Using Rancher, you can create pools of nodes based on a [node template]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. The available cloud providers to create a node template are decided based on active [node drivers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). The benefit of using a node hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher will automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - - As of v2.2.0, [cloud credential]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials) are created to store credentials for launching nodes in your infrastructure providers. There are two benefits of using a cloud credential: - - Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - - Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. - -- [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/): - - For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - - >**Note:** If you want to reuse a node from a previous custom cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -
+RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. ### Requirements -If you use RKE to set up a cluster, your cluster nodes must meet our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). +If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. + +### Launching Kubernetes on New Nodes in an Infrastructure Provider + +Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) + +### Launching Kubernetes on Existing Custom Nodes + +In this scenario, you want to install Kubernetes on bare-metal servers, on-premise virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. + +If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md index df127eaf86e..9835c53a18c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md @@ -1,20 +1,23 @@ --- -title: Creating a Cluster with Custom Nodes -shortTitle: Custom Nodes +title: Launching Kubernetes on Existing Custom Nodes +description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements +metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" weight: 2225 aliases: - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ --- -## Custom Nodes +When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-premise bare-metal servers, on-premise virtual machines, or in any node hosted by an infrastructure provider. -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to Rancher [requirements](#requirements), which includes some hardware specifications and Docker. After you install Docker on each server, run the command provided in the Rancher UI to turn each server into a Kubernetes node. +To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, run the command provided in the Rancher UI to turn each server into a Kubernetes node. -## Objectives for Creating Cluster with Custom Nodes +This section describes how to set up a custom cluster. + +# Creating a Cluster with Custom Nodes >**Want to use Windows hosts as Kubernetes workers?** > ->See [Configuring Custom Clusters for Windows]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. +>See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. @@ -24,7 +27,7 @@ To use this option you'll need access to servers you intend to use in your Kuber -## 1. Provision a Linux Host +### 1. Provision a Linux Host Begin creation of a custom cluster by provisioning a Linux host. Your host can be: @@ -32,18 +35,11 @@ Begin creation of a custom cluster by provisioning a Linux host. Your host can b - An on-premise VM - A bare-metal server ->**Notes:** -> ->- While creating your cluster, you must assign Kubernetes roles to your cluster nodes. If you plan on dedicating servers to each role, you must provision a server for each role (i.e. provision multiple servers). ->- If you want to reuse a node from a previous custom cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. +If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. -Provision the host according to the requirements below. +Provision the host according to the [installation requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/production) -### Requirements - -Each node in your cluster must meet our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). - -## 2. Create the Custom Cluster +### 2. Create the Custom Cluster 1. From the **Clusters** page, click **Add Cluster**. @@ -55,7 +51,7 @@ Each node in your cluster must meet our [Requirements]({{< baseurl >}}/rancher/v 5. {{< step_create-cluster_cluster-options >}} - >**Using Windows nodes as Kubernetes workers?** + >**Using Windows nodes as Kubernetes workers?** > >- See [Enable the Windows Support Option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#enable-the-windows-support-option). >- The only Network Provider available for clusters with Windows support is Flannel. See [Networking Option]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#networking-option). @@ -68,10 +64,7 @@ Each node in your cluster must meet our [Requirements]({{< baseurl >}}/rancher/v >- Using Windows nodes as Kubernetes workers? See [Node Configuration]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/#node-configuration). >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). -8. **Optional**: Click **Show advanced options** to specify IP address(es) to use when registering the node, override the hostname of the node or to add labels to the node. - - [Rancher Agent Options]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/)
- [Kubernetes Documentation: Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +8. **Optional**: Click **[Show advanced options]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. 9. Copy the command displayed on screen to your clipboard. @@ -83,7 +76,7 @@ Each node in your cluster must meet our [Requirements]({{< baseurl >}}/rancher/v {{< result_create-cluster >}} -## 3. Amazon Only: Tag Resources +### 3. Amazon Only: Tag Resources If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. @@ -113,3 +106,10 @@ If you share resources between clusters, you can change the tag to: ``` Key=kubernetes.io/cluster/CLUSTERID, Value=shared ``` + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md similarity index 82% rename from content/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/_index.md rename to content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md index 925c5df7aff..241369bc24d 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md @@ -1,12 +1,14 @@ --- title: Rancher Agent Options -weight: 1140 +weight: 2500 aliases: - /rancher/v2.x/en/admin-settings/agent-options/ - + - /rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options --- -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [Create a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) and add the options to the generated `docker run` command when adding a node. +Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) and add the options to the generated `docker run` command when adding a node. + +For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#3-node-agents) ## General options @@ -16,7 +18,8 @@ Rancher deploys an agent on each node to communicate with the node. This pages d | `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | | `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | | `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | -| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node (`--label key=value`) | +| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | +| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | ## Role options diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md index 8e96cf63b2e..0d7a0e5ab67 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/_index.md @@ -1,38 +1,121 @@ --- -title: Nodes Hosted in an Infrastructure Provider +title: Launching Kubernetes on New Nodes in an Infrastructure Provider weight: 2205 aliases: - /rancher/v2.x/en/concepts/global-configuration/node-templates/ --- -## Node Pools +Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. -Using Rancher, you can create pools of nodes based on a [node template](#node-templates). The benefit of using a node pool is that if a node loses connectivity with the cluster, Rancher will automatically create another node to join the cluster to ensure that the count of the node pool is as expected. +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. -Each node pool is assigned with a [node component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#node-components) to specify how these nodes should be configured for the Kubernetes cluster. +The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). -## Node Templates +This section covers the following topics: -A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. Rancher provides a nice UI to be able to launch these nodes and uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. +- [Node templates](#node-templates) + - [Node labels](#node-labels) + - [Node taints](#node-taints) +- [Node pools](#node-pools) + - [Node pool taints](#node-pool-taints) + - [About node auto-replace](#about-node-auto-replace) + - [Enabling node auto-replace](#enabling-node-auto-replace) + - [Disabling node auto-replace](#disabling-node-auto-replace) +- [Cloud credentials](#cloud-credentials) +- [Node drivers](#node-drivers) -After you create a node template in Rancher, it's saved so that you can use this template again to create other node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. +# Node Templates -## Cloud Credentials +A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. + +After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. + +### Node Labels + +You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. + +### Node Taints + +_Available as of Rancher v2.3.0_ + +You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. + +Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +# Node Pools + +Using Rancher, you can create pools of nodes based on a [node template](#node-templates). The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. + +Each node pool is assigned with a [node component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) to specify how these nodes should be configured for the Kubernetes cluster. + +### Node Pool Taints + +_Available as of Rancher v2.3.0_ + +If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. + +For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. + +When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +### About Node Auto-replace + +_Available as of Rancher v2.3.0_ + +If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. + +> **Important:** Self-healing node pools are designed to help you replace worker nodes for **stateless** applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +{{% accordion id="how-does-node-auto-replace-work" label="How does Node Auto-replace Work?" %}} + Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. +{{% /accordion %}} + +### Enabling Node Auto-replace + +When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. + +1. In the form for creating a cluster, go to the **Node Pools** section. +1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Fill out the rest of the form for creating a cluster. + +**Result:** Node auto-replace is enabled for the node pool. + +You can also enable node auto-replace after the cluster is created with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ellipsis **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Click **Save.** + +**Result:** Node auto-replace is enabled for the node pool. + +### Disabling Node Auto-replace + +You can disable node auto-replace from the Rancher UI with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ellipsis **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. +1. Click **Save.** + +**Result:** Node auto-replace is disabled for the node pool. + +# Cloud Credentials _Available as of v2.2.0_ Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: -- Cloud credentials are stored as Kubernetes secrets for security. Credentials are no longer needed to be re-entered any time you want to edit a node template. +- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - After the cloud credential is created, it can be re-used to create additional node templates. -- When access and secret keys are expired or compromised, the cloud credential can be updated with the new information, which will automatically be updated for all the node templates referencing this cloud credential. +- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. > **Note:** As of v2.2.0, the default `active` [node drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{< baseurl >}}/rancher/v2.x/en/user-settings/cloud-credentials/). -## Node Drivers +# Node Drivers -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). +If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.x/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md index d1f29074b38..73dbf40a0bb 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md @@ -24,9 +24,7 @@ Use {{< product >}} to create a Kubernetes cluster in Azure. 2. Complete the **Azure Options** form. - - **Account Access** stores your account information for authenticating with Azure. - - {{< step_create-cloud-credential >}} + - **Account Access** stores your account information for authenticating with Azure. Note: As of v2.2.0, account access information is stored as a cloud credentials. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. To create a new cloud credential, enter **Name** and **Account Access** data, then click **Create.** - **Placement** sets the geographical region where your cluster is hosted and other location metadata. @@ -43,3 +41,10 @@ Use {{< product >}} to create a Kubernetes cluster in Azure. 7. Review your options to confirm they're correct. Then click **Create**. {{< result_create-cluster >}} + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md index 811bb6fb8fb..21f6438048c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md @@ -19,14 +19,12 @@ Use {{< product >}} to create a Kubernetes cluster using DigitalOcean. 6. {{< step_create-cluster_node-pools >}} - 1. Click **Add Node Template**. + 1. Click **Add Node Template**. Note: As of v2.2.0, account access information is stored as a cloud credentials. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. To create a new cloud credential, enter **Name** and **Account Access** data, then click **Create.** 2. Complete the **Digital Ocean Options** form. - **Access Token** stores your DigitalOcean Personal Access Token. Refer to [DigitalOcean Instructions: How To Generate a Personal Access Token](https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2#how-to-generate-a-personal-access-token). - {{< step_create-cloud-credential >}} - - **Droplet Options** provision your cluster's geographical region and specifications. 4. {{< step_rancher-template >}} @@ -38,3 +36,10 @@ Use {{< product >}} to create a Kubernetes cluster using DigitalOcean. 7. Review your options to confirm they're correct. Then click **Create**. {{< result_create-cluster >}} + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 5722a01b0a6..94a58722719 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -1,22 +1,82 @@ --- title: Creating an Amazon EC2 Cluster shortTitle: Amazon EC2 +description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher weight: 2210 -aliases: - - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-amazon-ec2/ --- -Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. +Use Rancher to create a Kubernetes cluster in Amazon EC2. -## Prerequisites +### Prerequisites -- AWS EC2 Access Key and Secret key that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- IAM Policy created to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our two example JSON policies below: +- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. +- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - [Example IAM Policy](#example-iam-policy) - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- IAM Policy added as Permission to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) +- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. +# Creating an EC2 Cluster -## Create the cluster +The steps to create a cluster differ based on your Rancher version. + +{{% tabs %}} +{{% tab "Rancher v2.2.0+" %}} + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **Amazon.** +1. In the **Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key.** +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials and information from EC2 +Complete each of the following forms using information available from the [EC2 Management Console](https://aws.amazon.com/ec2). + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. In the **Region** field, select the same region that you used when creating your cloud credentials. +1. In the **Cloud Credentials** field, select your newly created cloud credentials. +1. Click **Next: Authenticate & configure nodes.** +1. Choose an availability zone and network settings for your cluster. Click **Next: Select a Security Group.** +1. Choose the default security group or configure a security group. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#security-group-for-nodes-on-aws-ec2) to see what rules are created in the `rancher-nodes` Security Group. Then click **Next: Set Instance options.** +1. Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. + +> If you need to pass an IAM Instance Profile Name (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + +Optional: In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. + +### 3. Create a cluster with node pools using the node template + +{{< step_create-cluster_node-pools >}} + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Amazon EC2**. + +1. Enter a **Cluster Name**. + +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. + +1. Click **Add Member** to add users that can access the cluster. + +1. Use the **Role** drop-down to set permissions for each user. + +1. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. + +1. Click **Create**. + +{{< result_create-cluster >}} +{{% /tab %}} +{{% tab "Rancher prior to v2.2.0+" %}} 1. From the **Clusters** page, click **Add Cluster**. @@ -26,7 +86,7 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. 1. {{< step_create-cluster_member-roles >}} -1. {{< step_create-cluster_cluster-options >}}Refer to [Selecting Cloud Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider.

+1. {{< step_create-cluster_cluster-options >}}Refer to [Selecting Cloud Providers]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. 1. {{< step_create-cluster_node-pools >}} @@ -34,23 +94,28 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. 1. Complete each of the following forms using information available from the [EC2 Management Console](https://aws.amazon.com/ec2). - - **Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. See [Prerequisistes](#prerequisites) how to create the Access Key and Secret Key and the needed permissions. - - {{< step_create-cloud-credential >}} - - - **Zone and Network** configures the availability zone and network settings for your cluster. - - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{< baseurl >}}/rancher/v2.x/en/installation/references/#amazonec2-securitygroup-nodedriver) to see what rules are created in the `rancher-nodes` Security Group. - - **Instance** configures the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. + - **Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. See [Prerequisites](#prerequisites) how to create the Access Key and Secret Key and the needed permissions. + - **Zone and Network** configures the availability zone and network settings for your cluster. + - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#security-group-for-nodes-on-aws-ec2) to see what rules are created in the `rancher-nodes` Security Group. + - **Instance** configures the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI.

- If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - 1. {{< step_rancher-template >}} - 1. Click **Create**. - 1. **Optional:** Add additional node pools. -
+1. {{< step_rancher-template >}} +1. Click **Create**. +1. **Optional:** Add additional node pools. 1. Review your cluster settings to confirm they are correct. Then click **Create**. {{< result_create-cluster >}} +{{% /tab %}} +{{% /tabs %}} + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. ### Example IAM Policy @@ -157,3 +222,45 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. ] } ``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md index 517c0d0f38d..e512460253d 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md @@ -1,174 +1,47 @@ --- title: Creating a vSphere Cluster shortTitle: vSphere +description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. weight: 2225 aliases: - /rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ --- -Use {{< product >}} to create a Kubernetes cluster in vSphere. -## Introduction +By using Rancher with vSphere, you can bring cloud operations on-premises. -When creating a vSphere cluster, Rancher first provisions the specified amount of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for the data, control, and worker plane respectively. +Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. ->**Note:** ->The vSphere node driver included in Rancher currently only supports the provisioning of VMs with [RancherOS]({{< baseurl >}}/os/v1.x/en/) as the guest operating system. +A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. -## Prerequisites +# vSphere Enhancements -### vSphere API permissions +The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. If you are planning to make use of vSphere volumes for persistent storage in the cluster, there are [additional requirements]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/vsphere/) that must be met. +### Self-healing Node Pools -### Network permissions +_Available as of v2.3.0_ -You must ensure that the hosts running Rancher servers are able to establish network connections to the following network endpoints: +One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. -- vCenter server (usually port 443/TCP) -- Every ESXi host that is part of the datacenter to be used to provision virtual machines for your clusters (port 443/TCP). +> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. +### Dynamically Populated Options for Instances and Scheduling -## Provisioning a vSphere Cluster +_Available as of v2.3.3_ -The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: +Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. -1. From the **vSphere** console, go to the **Administration** page. +For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) -2. Go to the **Roles** tab. +### More Supported Operating Systems -3. Create a new role. Give it a name and select the privileges listed in the [permissions table](#annex-vsphere-permissions). +In Rancher v2.3.3+, you can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) - ![image]({{< baseurl >}}/img/rancher/rancherroles1.png) +In Rancher prior to v2.3.3, the vSphere node driver included in Rancher only supported the provisioning of VMs with [RancherOS]({{}}/os/v1.x/en/) as the guest operating system. -4. Go to the **Users and Groups** tab. +# Video Walkthrough of v2.3.3 Node Template Features -5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, as you will need it when configuring node templates in Rancher. +In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. - ![image]({{< baseurl >}}/img/rancher/rancheruser.png) - -6. Go to the **Global Permissions** tab. - -7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - - ![image]({{< baseurl >}}/img/rancher/globalpermissionuser.png) - - ![image]({{< baseurl >}}/img/rancher/globalpermissionrole.png) - -## Creating vSphere Clusters - -### Create a vSphere Node Template - -To create a cluster, you need to create at least one vSphere [node template]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) that specifies how VMs are created in vSphere. - ->**Note:** ->Once you create a node template, it is saved, and you can re-use it whenever you create additional vSphere clusters. - -1. Log in with an admin account to the Rancher UI. - -2. From the user settings menu, select **Node Templates**. - -3. Click **Add Template** and then click on the **vSphere** icon. - -4. Under [Account Access](#account-access) enter the vCenter FQDN or IP address and the credentials for the vSphere user account (see [Prerequisites](#prerequisites)). - - {{< step_create-cloud-credential >}} - -5. Under [Instance Options](#instance-options), configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -6. **Optional:** Enter the URL pointing to a [RancherOS]({{< baseurl >}}/os/v1.x/en/) cloud-config file in the [Cloud Init](#instance-options) field. - -7. Ensure that the [OS ISO URL](#instance-options) contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). - - ![image]({{< baseurl >}}/img/rancher/vsphere-node-template-1.png) - -8. **Optional:** Provide a set of [Configuration Parameters](#instance-options) for the VMs. - -9. Under **Scheduling**, enter the name/path of the **Data Center** to create the VMs in, the name of the **VM Network** to attach to, and the name/path of the **Datastore** to store the disks in. - - ![image]({{< baseurl >}}/img/rancher/vsphere-node-template-2.png) - -10. **Optional:** Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. - -11. **Optional:** Customize the configuration of the Docker daemon on the VMs that will be created. - -10. Assign a descriptive **Name** for this template and click **Create**. - -___ - -### Create a vSphere Cluster - -After you've created a template, you can use it stand up the vSphere cluster itself. - -1. From the **Global** view, click **Add Cluster**. - -2. Choose **vSphere**. - -3. Enter a **Cluster Name**. - -4. {{< step_create-cluster_member-roles >}} - -5. {{< step_create-cluster_cluster-options >}} - -6. {{< step_create-cluster_node-pools >}} - - ![image]({{< baseurl >}}/img/rancher/vsphere-cluster-create-1.png) - -7. Review your configuration, then click **Create**. - -> **Note:** -> -> If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. - -{{< result_create-cluster >}} - -## Annex - Node Template Configuration Reference - -The tables below describe the configuration options available in the vSphere node template. - -### Account Access - -| Parameter | Required | Description | -|:------------------------:|:--------:|:------------------------------------------------------------:| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - -___ - -### Instance Options - -| Parameter | Required | Description | -|:------------------------:|:--------:|:------------------------------------------------------------:| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -___ - -### Scheduling Options - -| Parameter | Required | Description | -|:------------------------:|:--------:|:------------------------------------------------------------:| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name/path of folder in the datastore to create the VMs in. Must already exist. | - -___ - -## Annex - vSphere Permissions - -The following table lists the permissions required for the vSphere user account configured in the node templates: - -| Privilege Group | Operations | -|:----------------------|:-----------------------------------------------------------------------| -| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | -| Network | Assign | -| Resource | AssignVMToPool | -| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | +{{< youtube id="dPIwg6x1AlU">}} diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md new file mode 100644 index 00000000000..ecee7787ccc --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md @@ -0,0 +1,319 @@ +--- +title: Provisioning Kubernetes Clusters in vSphere +weight: 1 +--- + +This section explains how to configure Rancher with vSphere credentials, provision nodes in vSphere, and set up Kubernetes clusters on those nodes. + +# Prerequisites + +This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. + +The node templates are documented and tested with the vSphere Web Services API version 6.5. + +- [Create credentials in vSphere](#create-credentials-in-vsphere) +- [Network permissions](#network-permissions) +- [Valid ESXi License for vSphere API Access](#valid-esxi-license-for-vsphere-api-access) + +### Create Credentials in vSphere + +Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. + +Refer to this [how-to guide]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. + +### Network Permissions + +It must be ensured that the hosts running the Rancher server are able to establish the following network connections: + +- To the vSphere API on the vCenter server (usually port 443/TCP). +- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher prior to v2.3.3 or when using the ISO creation method in later versions*). +- To port 22/TCP and 2376/TCP on the created VMs + +See [Node Networking Requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. + +### Valid ESXi License for vSphere API Access + +The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. + +# Creating Clusters in vSphere with Rancher + +This section describes how to set up vSphere credentials, node templates, and vSphere clusters using the Rancher UI. + +You will need to do the following: + +1. [Create a node template using vSphere credentials](#1-create-a-node-template-using-vsphere-credentials) +2. [Create a Kubernetes cluster using the node template](#2-create-a-kubernetes-cluster-using-the-node-template) +3. [Optional: Provision storage](#3-optional-provision-storage) + - [Enable the vSphere cloud provider for the cluster](#enable-the-vsphere-cloud-provider-for-the-cluster) + +### Configuration References + +For details on configuring the node template, refer to the [node template configuration reference.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) + +Rancher uses the RKE library to provision Kubernetes clusters. For details on configuring clusters in vSphere, refer to the [cluster configuration reference in the RKE documentation.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) + +Note that the vSphere cloud provider must be [enabled](#enable-the-vsphere-cloud-provider-for-the-cluster) to allow dynamic provisioning of volumes. + +# 1. Create a Node Template Using vSphere Credentials + +To create a cluster, you need to create at least one vSphere [node template]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) that specifies how VMs are created in vSphere. + +After you create a node template, it is saved, and you can re-use it whenever you create additional vSphere clusters. + +To create a node template, + +1. Log in with an administrator account to the Rancher UI. + +1. From the user settings menu, select **Node Templates.** + +1. Click **Add Template** and then click on the **vSphere** icon. + +Then, configure your template: + +- [A. Configure the vSphere credential](#a-configure-the-vsphere-credential) +- [B. Configure node scheduling](#b-configure-node-scheduling) +- [C. Configure instances and operating systems](#c-configure-instances-and-operating-systems) +- [D. Add networks](#d-add-networks) +- [E. If not already enabled, enable disk UUIDs](#e-if-not-already-enabled-enable-disk-uuids) +- [F. Optional: Configure node tags and custom attributes](#f-optional-configure-node-tags-and-custom-attributes) +- [G. Optional: Configure cloud-init](#g-optional-configure-cloud-init) +- [H. Saving the node template](#h-saving-the-node-template) + +### A. Configure the vSphere Credential + +The steps for configuring your vSphere credentials for the cluster are different depending on your version of Rancher. + +{{% tabs %}} +{{% tab "Rancher v2.2.0+" %}} + +Your account access information is in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) Cloud credentials are stored as Kubernetes secrets. + +You can use an existing cloud credential or create a new one. To create a new cloud credential, + +1. Click **Add New.** +1. In the **Name** field, enter a name for your vSphere credentials. +1. In the **vCenter or ESXi Server** field, enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. +1. Optional: In the **Port** field, configure the port of the vCenter or ESXi server. +1. In the **Username** and **Password** fields, enter your vSphere login username and password. +1. Click **Create.** + +**Result:** The node template has the credentials required to provision nodes in vSphere. + +{{% /tab %}} +{{% tab "Rancher prior to v2.2.0" %}} +In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. +{{% /tab %}} +{{% /tabs %}} + +### B. Configure Node Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. The configuration options depend on your version of Rancher. + +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. + +1. In the **Data Center** field, choose the data center where the VM will be scheduled. +1. Optional: Select a **Resource Pool.** Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. +1. If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. +1. Optional: Select a folder where the VM will be placed. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. Note: The folder name should be prefaced with `vm/` in your vSphere config file. +1. Optional: Choose a specific host to create the VM on. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +In the **Scheduling** section, enter: + +- The name/path of the **Data Center** to create the VMs in +- The name of the **VM Network** to attach to +- The name/path of the **Datastore** to store the disks in + + {{< img "/img/rancher/vsphere-node-template-2.png" "image" >}} + +{{% /tab %}} +{{% /tabs %}} + +### C. Configure Instances and Operating Systems + +Depending on the Rancher version there are different options available to configure instances. + +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). + +The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). + +Choose the way that the VM will be created: + +- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. +- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list `Library templates`. +- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. +- **Install from boot2docker ISO:** Ensure that the `OS ISO URL` field contains the URL of a VMware ISO release for RancherOS (rancheros-vmware.iso). Note that this URL must be accessible from the nodes running your Rancher server installation. + +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +Only VMs booting from RancherOS ISO are supported. + +Ensure that the [OS ISO URL](#instance-options) contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. + + ![image]({{< baseurl >}}/img/rancher/vsphere-node-template-1.png) + +{{% /tab %}} +{{% /tabs %}} + +### D. Add Networks + +_Available as of v2.3.3_ + +The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. + +### E. If Not Already Enabled, Enable Disk UUIDs + +In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. + +As of Rancher v2.0.4, disk UUIDs are enabled in vSphere node templates by default. + +If you are using Rancher prior to v2.0.4, refer to these [instructions]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#enabling-disk-uuids-with-a-node-template) for details on how to enable a UUID with a Rancher node template. + +### F. Optional: Configure Node Tags and Custom Attributes + +The way to attach metadata to the VM is different depending on your Rancher version. + +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +**Optional:** Add vSphere tags and custom attributes. Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +For tags, all your vSphere tags will show up as options to select from in your node template. + +In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. + + > **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +**Optional:** + + - Provide a set of configuration parameters (instance-options) for the VMs. + - Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. + - Customize the configuration of the Docker daemon on the VMs that will be created. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +{{% /tab %}} +{{% /tabs %}} + +### G. Optional: Configure cloud-init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +The scope of cloud-init support for the VMs differs depending on the Rancher version. + +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. + +*Note that cloud-init is not supported when using the ISO creation method.* + +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation]https://rancher.com/docs/os/v1.x/en/installation/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. + +{{% /tab %}} +{{% /tabs %}} + +### H. Saving the Node Template + +Assign a descriptive **Name** for this template and click **Create.** + +### Node Template Configuration Reference + +Refer to [this section]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) for a reference on the configuration options available for vSphere node templates. + +# 2. Create a Kubernetes Cluster Using the Node Template + +After you've created a template, you can use it to stand up the vSphere cluster itself. + +To install Kubernetes on vSphere nodes, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. This requirement applies to both pre-created [custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) and for nodes created in Rancher using the vSphere node driver. + +To create the cluster and enable the vSphere provider for cluster, follow these steps: + +- [A. Set up the cluster name and member roles](#a-set-up-the-cluster-name-and-member-roles) +- [B. Configure Kubernetes options](#b-configure-kubernetes-options) +- [C. Add node pools to the cluster](#c-add-node-pools-to-the-cluster) +- [D. Optional: Add a self-healing node pool](#d-optional-add-a-self-healing-node-pool) +- [E. Create the cluster](#e-create-the-cluster) + +### A. Set up the Cluster Name and Member Roles + +1. Log in to the Rancher UI as an administrator. +2. Navigate to **Clusters** in the **Global** view. +3. Click **Add Cluster** and select the **vSphere** infrastructure provider. +4. Assign a **Cluster Name.** +5. Assign **Member Roles** as required. {{< step_create-cluster_member-roles >}} + +> **Note:** +> +> If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. + + +### B. Configure Kubernetes Options +{{}} + +### C. Add Node Pools to the Cluster +{{}} + +### D. Optional: Add a Self-Healing Node Pool + +To make a node pool self-healing, enter a number greater than zero in the **Auto Replace** column. Rancher will use the node template for the given node pool to recreate the node if it becomes inactive for that number of minutes. + +> **Note:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +### E. Create the Cluster + +Click **Create** to start provisioning the VMs and Kubernetes services. + +{{< result_create-cluster >}} + +# 3. Optional: Provision Storage + +For an example of how to provision storage in vSphere using Rancher, refer to the + [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) + + In order to provision storage in vSphere, the vSphere provider must be enabled. + +### Enable the vSphere Cloud Provider for the Cluster + +1. Set **Cloud Provider** option to `Custom`. + + {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} + +1. Click on **Edit as YAML** +1. Insert the following structure to the pre-populated cluster YAML. As of Rancher v2.3+, this structure must be placed under `rancher_kubernetes_engine_config`. In versions prior to v2.3, it has to be defined as a top-level field. Note that the `name` *must* be set to `vsphere`. + + ```yaml + rancher_kubernetes_engine_config: # Required as of Rancher v2.3+ + cloud_provider: + name: vsphere + vsphereCloudProvider: + [Insert provider configuration] + ``` + + Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md new file mode 100644 index 00000000000..9c5bc71a0e2 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials/_index.md @@ -0,0 +1,41 @@ +--- +title: Creating Credentials in the vSphere Console +weight: 1 +--- + +This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. + +The following table lists the permissions required for the vSphere user account: + +| Privilege Group | Operations | +|:----------------------|:-----------------------------------------------------------------------| +| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | +| Network | Assign | +| Resource | AssignVMToPool | +| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | + +The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: + +1. From the **vSphere** console, go to the **Administration** page. + +2. Go to the **Roles** tab. + +3. Create a new role. Give it a name and select the privileges listed in the permissions table above. + + {{< img "/img/rancher/rancherroles1.png" "image" >}} + +4. Go to the **Users and Groups** tab. + +5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. + + {{< img "/img/rancher/rancheruser.png" "image" >}} + +6. Go to the **Global Permissions** tab. + +7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. + + {{< img "/img/rancher/globalpermissionuser.png" "image" >}} + + {{< img "/img/rancher/globalpermissionrole.png" "image" >}} + +**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md new file mode 100644 index 00000000000..2388ad4e8ad --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids/_index.md @@ -0,0 +1,24 @@ +--- +title: Enabling Disk UUIDs in Node Templates +weight: 3 +--- + +As of Rancher v2.0.4, disk UUIDs are enabled in vSphere node templates by default. + +For Rancher prior to v2.0.4, we recommend configuring a vSphere node template to automatically enable disk UUIDs because they are required for Rancher to manipulate vSphere resources. + +To enable disk UUIDs for all VMs created for a cluster, + +1. Navigate to the **Node Templates** in the Rancher UI while logged in as an administrator. + +2. Add or edit an existing vSphere node template. + +3. Under **Instance Options** click on **Add Parameter**. + +4. Enter `disk.enableUUID` as key with a value of **TRUE**. + + {{< img "/img/rke/vsphere-nodedriver-enable-uuid.png" "vsphere-nodedriver-enable-uuid" >}} + +5. Click **Create** or **Save**. + +**Result:** The disk UUID is enabled in the vSphere node template. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md new file mode 100644 index 00000000000..cdc3d70e232 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/_index.md @@ -0,0 +1,93 @@ +--- +title: vSphere Node Template Configuration Reference +weight: 4 +--- + +The tables below describe the configuration options available in the vSphere node template: + +- [Account access](#account-access) +- [Instance options](#instance-options) +- [Scheduling options](#scheduling-options) + +# Account Access + +The account access parameters are different based on the Rancher version. + +{{% tabs %}} +{{% tab "Rancher v2.2.0+" %}} + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | + +{{% /tab %}} +{{% tab "Rancher prior to v2.2.0" %}} + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. | +| Port | * | Port to use when connecting to the server. Defaults to `443`. | +| Username | * | vCenter/ESXi user to authenticate with the server. | +| Password | * | User's password. | + +{{% /tab %}} +{{% /tabs %}} + +# Instance Options + +The options for creating and configuring an instance are different depending on your Rancher version. + +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +| Parameter | Required | Description | +|:----------------|:--------:|:-----------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to the section on [configuring instances.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#c-configure-instances-and-operating-systems) | +| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | +| Networks | | Name(s) of the network to attach the VM to. | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + +{{% /tab %}} +{{% /tabs %}} + +# Scheduling Options +The options for scheduling VMs to a hypervisor are different depending on your Rancher version. +{{% tabs %}} +{{% tab "Rancher v2.3.3+" %}} + +| Parameter | Required | Description | +|:------------------------|:--------:|:-------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Resource Pool | | Name of the resource pool to schedule the VMs in. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | +| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | +| Host | | The IP of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | + +{{% /tab %}} +{{% tab "Rancher prior to v2.3.3" %}} + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | +| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | +| Network | * | Name of the VM network to attach VMs to. | +| Data Store | * | Datastore to store the VM disks. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | +{{% /tab %}} +{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md index 7e593486172..bd6c563029c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md @@ -1,56 +1,84 @@ --- -title: Cluster Options +title: Cluster Configuration Reference weight: 2250 --- -As you configure a new cluster that's provisioned using [RKE]({{< baseurl >}}/rke/latest/en/), you can choose custom Kubernetes options. +As you configure a new cluster that's [provisioned using RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), you can choose custom Kubernetes options. You can configure Kubernetes options one of two ways: - [Rancher UI](#rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Config File](#config-file): Alternatively, you can create a [RKE config file]({{< baseurl >}}/rke/latest/en/config-options/) to customize any option offered by Kubernetes. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. -## Rancher UI +In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) + +This section is a cluster configuration reference, covering the following topics: + +- [Rancher UI Options](#rancher-ui-options) + - [Kubernetes version](#kubernetes-version) + - [Network provider](#network-provider) + - [Kubernetes cloud providers](#kubernetes-cloud-providers) + - [Private registries](#private-registries) + - [Authorized cluster endpoint](#authorized-cluster-endpoint) +- [Advanced Options](#advanced-options) + - [NGINX Ingress](#nginx-ingress) + - [Node port range](#node-port-range) + - [Metrics server monitoring](#metrics-server-monitoring) + - [Pod security policy support](#pod-security-policy-support) + - [Docker version on nodes](#docker-version-on-nodes) + - [Docker root directory](#docker-root-directory) + - [Recurring etcd snapshots](#recurring-etcd-snapshots) +- [Cluster config file](#cluster-config-file) + - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0+) + - [Config file structure in Rancher v2.0.0-v2.2.x](#config-file-structure-in-rancher-v2-0-0-v2-2-x) + - [Default DNS provider](#default-dns-provider) +- [Rancher specific parameters](#rancher-specific-parameters) + +# Rancher UI Options When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. -From this section you can choose: +### Kubernetes Version -- The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). -- The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/networking/cni-providers/). +### Network Provider - >**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/networking/cni-providers/). - Out of the box, Rancher is compatible with the following network providers: +>**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - - [Canal](https://github.com/projectcalico/canal) +Out of the box, Rancher is compatible with the following network providers: - In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). +- [Canal](https://github.com/projectcalico/canal) +- [Flannel](https://github.com/coreos/flannel#flannel) +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Weave](https://github.com/weaveworks/weave) (Available as of v2.2.0) - As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). +**Notes on Canal:** + +In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). - >**Attention Rancher v2.0.0 - v2.0.6 Users** - > - >- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. - >- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. +As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/). +>**Attention Rancher v2.0.0 - v2.0.6 Users** +> +>- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. +>- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. - - [Flannel](https://github.com/coreos/flannel#flannel) +**Notes on Flannel:** - In v2.0.5, this was the default option, which did not prevent any network isolation between projects. +In v2.0.5, this was the default option, which did not prevent any network isolation between projects. - - [Calico](https://docs.projectcalico.org/) - - [Weave](https://github.com/weaveworks/weave) (_Available as of v2.2.0_) +**Notes on Weave:** - When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the [Weave Network Plug-in Options]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) and the [Weave Network Plug-in Options]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). +### Kubernetes Cloud Providers -
+You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. -- If you want to configure a [Kubernetes cloud provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - - >**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: @@ -58,56 +86,258 @@ If you want to see all the configuration options for a cluster, please click **S _Available as of v2.2.0_ -If you are using a private registry with authentication for your Docker images, please configure the registry in this section to allow the nodes to pull images from this registry. See [Private Registries]({{< baseurl >}}/rke/latest/en/config-options/private-registries/) for more information. +The cluster-level private registry configuration is only used for provisioning clusters. + +There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.x/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. + +The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. ### Authorized Cluster Endpoint _Available as of v2.2.0_ -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. This is enabled by default, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. It is recommended to create an FQDN pointing to a load balancer which load balances across your nodes with the `controlplane` role. If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate which will be included in the generated kubeconfig to validate the certificate chain. See the [Kubeconfig Files]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API Keys]({{< baseurl >}}/v2.x/en/user-settings/api-keys/#creating-an-api-key) documentation for more information. +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. -### Advanced Cluster Options +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.x/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. -#### Nginx Ingress +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) + +# Advanced Options + +The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** + +### NGINX Ingress Option to enable or disable the [NGINX ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/). -#### Node Port Range +### Node Port Range Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. -#### Metrics Server Monitoring +### Metrics Server Monitoring Option to enable or disable [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/). -#### Pod Security Policy Support +### Pod Security Policy Support Option to enable and select a default [Pod Security Policy]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. -#### Docker version on nodes +### Docker Version on Nodes Option to require [a supported Docker version]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. -#### Docker Root Directory +### Docker Root Directory If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. -#### Recurring etcd Snapshots +### Recurring etcd Snapshots -Option to enable or disable [recurring etcd snaphots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). +Option to enable or disable [recurring etcd snapshots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). -## Config File +# Cluster Config File + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. >**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{< baseurl >}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - - To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. - To read from an existing RKE file, click **Read from a file**. ![image]({{< baseurl >}}/img/rancher/cluster-options-yaml.png) -For an example of RKE config file syntax, see the [RKE documentation]({{< baseurl >}}/rke/latest/en/example-yamls/). +The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. + +### Config File Structure in Rancher v2.3.0+ + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.3.0+" %}} + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +{{% /accordion %}} + +### Config File Structure in Rancher v2.0.0-v2.2.x + +An example cluster config file is included below. + +{{% accordion id="prior-to-v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.0.0-v2.2.x" %}} +```yaml +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.15.3-rancher3-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 +ssh_agent_auth: false +``` +{{% /accordion %}} ### Default DNS provider @@ -119,25 +349,25 @@ The table below indicates what DNS provider is deployed by default. See [RKE doc | v2.2.5 and higher | v1.13.x and lower | kube-dns | | v2.2.4 and lower | any | kube-dns | -### Rancher specific parameters +# Rancher specific parameters _Available as of v2.2.0_ Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): -#### docker_root_dir +### docker_root_dir See [Docker Root Directory](#docker-root-directory). -#### enable_cluster_monitoring +### enable_cluster_monitoring Option to enable or disable [Cluster Monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/). -#### enable_network_policy +### enable_network_policy Option to enable or disable Project Network Isolation. -#### local_cluster_auth_endpoint +### local_cluster_auth_endpoint See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). @@ -149,3 +379,16 @@ local_cluster_auth_endpoint: fqdn: "FQDN" ca_certs: "BASE64_CACERT" ``` + +### Custom Network Plug-in + +_Available as of v2.2.4_ + +You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/_index.md index 96088b6c645..fe04895fd71 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/_index.md @@ -1,5 +1,5 @@ --- -title: Selecting Cloud Providers +title: Setting up Cloud Providers weight: 2255 aliases: - /rancher/v2.x/en/concepts/clusters/cloud-providers/ @@ -163,6 +163,11 @@ Setting the value of the tag to `owned` tells the cluster that all resources wit **Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. + +#### Using Amazon Elastic Container Registry (ECR) + +The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. + ## Azure When using the `Azure` cloud provider, you can leverage the following capabilities: diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md new file mode 100644 index 00000000000..de3e9ba5058 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md @@ -0,0 +1,39 @@ +--- +title: Rancher Agents +weight: 2400 +--- + +There are two different agent resources deployed on Rancher managed clusters: + +- [cattle-cluster-agent](#cattle-cluster-agent) +- [cattle-node-agent](#cattle-node-agent) + +For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.x/en/overview/architecture/) + +### cattle-cluster-agent + +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. + +### cattle-node-agent + +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. + +> **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. + +### Scheduling rules + +_Applies to v2.3.0 and higher_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | +| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | + +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `requiredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. + +The `requiredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: + +| Weight | Expression | +| ------ | ------------------------------------------------ | +| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | +| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md index 187f4ea252e..337a4452bcc 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md @@ -1,141 +1,178 @@ --- -title: Configuring Custom Clusters for Windows (Experimental) +title: Launching Kubernetes on Windows Clusters weight: 2240 --- ->**Notes:** -> ->- Configuring Windows clusters is new and improved for Rancher v2.3.0! ->- Still using v2.1.x or v2.2.x? See the documentation for how to provision Windows clusters on [previous versions]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/). As of v2.1.10 and v2.2.4, the ability to provision Windows clusters has been removed in the 2.1.x and 2.2.x lines. +_Available as of v2.3.0_ -_Available as of v2.3.0-alpha1_ +When provisioning a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes custom cluster on your existing infrastructure. ->**Important:** -> ->Support for Windows nodes is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using Windows nodes in a production environment. +You can use a mix of Linux and Windows hosts as your cluster nodes. Windows nodes can only be used for deploying workloads, while Linux nodes are required for cluster management. -When provisioning a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, you can use a mix of Linux and Windows hosts as your cluster nodes. +You can only add Windows nodes to a cluster if Windows support is enabled. Windows support can be enabled for new custom clusters that use Kubernetes 1.15+ and the Flannel network provider. Windows support cannot be enabled for existing clusters. -This guide walks you through the creation of a custom cluster that includes three nodes. +> Windows clusters have more requirements than Linux clusters. For example, Windows nodes must have 50 GB of disk space. Make sure your Windows cluster fulfills all of the [requirements.](#requirements-for-windows-clusters) -* A Linux node, which serves as the Kubernetes control plane node. -* Another Linux node, which serves as a Kubernetes worker used to support Rancher Cluster agent, Metrics server, DNS and Ingress for the cluster. -* A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers. +For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). -## Prerequisites - -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{< baseurl >}}/rancher/v2.x/en/installation/) before proceeding with this guide. - -For a summary of Kubernetes features supported in Windows, see [Using Windows Server Containers in Kubernetes](https://kubernetes.io/docs/getting-started-guides/windows/#supported-features). - -### Node Requirements - -In order to add Windows worker nodes, the node must be running Windows Server 2019 (i.e. core version 1809 or above). Any earlier versions (e.g. core version 1803 and earlier) do not properly support Kubernetes. - -Windows overlay networking requires that [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix is installed. Most cloud-hosted VMs already have this hotfix. - -### Container Requirements - -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server 2019 core version 1809. If you have existing containers built for Windows Server 2019 core version 1803 or earlier, they must be re-built on Windows Server 2019 core version 1809. - -## Steps for Creating a Cluster with Windows Support - -To set up a custom cluster with support for Windows nodes and containers, you will need to complete the series of tasks listed below. +This guide covers the following topics: -- [1. Provision Hosts](#1-provision-hosts) -- [2. Create the Custom Cluster](#2-create-the-custom-cluster) -- [3. Add Linux Master Node](#3-add-linux-master-node) -- [4. Add Linux Worker Node](#4-add-linux-worker-node) -- [5. Add Windows Workers](#5-add-windows-workers) -- [6. Cloud-host VM Routes Configuration for Host Gateway Mode (Optional)](#6-cloud-hosted-vm-routes-configuration-for-host-gateway-mode) -- [7. Configuration for Azure Files (Optional)](#7-configuration-for-azure-files) - +- [Prerequisites](#prerequisites) +- [Requirements](#requirements-for-windows-clusters) + - [OS and Docker](#os-and-docker-requirements) + - [Nodes](#node-requirements) + - [Networking](#networking-requirements) + - [Architecture](#architecture-requirements) + - [Containers](#container-requirements) +- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) +- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) + -## 1. Provision Hosts +# Prerequisites -To begin provisioning a custom cluster with Windows support, prepare your hosts. Provision three nodes according to our [installation requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) - two Linux, one Windows. Your hosts can be: +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{< baseurl >}}/rancher/v2.x/en/installation/) before proceeding with this guide. + +> **Note on Cloud Providers:** If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. + +# Requirements for Windows Clusters + +For a custom cluster, the general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). + +### OS and Docker Requirements + +In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): + +- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. +- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. + +> **Notes:** +> +> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). +> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. + +### Node Requirements + +The hosts in the cluster need to have at least: + +- 2 core CPUs +- 5 GB memory +- 50 GB disk space + +Rancher will not provision the node if the node does not meet these requirements. + +### Networking Requirements + +Rancher only supports Windows using Flannel as the network provider. + +There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. + +For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. + +### Architecture Requirements + +The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. + +The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. + +We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: + + + +| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | +| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers | + +### Container Requirements + +Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. + +# Tutorial: How to Create a Cluster with Windows Support + +This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) + +When you provision a custom cluster with Rancher, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your custom cluster. + +To set up a custom cluster with support for Windows nodes and containers, you will need to complete the tasks below. + + + +1. [Provision Hosts](#1-provision-hosts) +1. [Create the Custom Cluster](#2-create-the-custom-cluster) +1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) +1. [Optional: Configuration for Azure Files](#5-optional-configuration-for-azure-files) + + +# 1. Provision Hosts + +To begin provisioning a custom cluster with Windows support, prepare your hosts. + +Your hosts can be: - Cloud-hosted VMs - VMs from virtualization clusters - Bare-metal servers -The table below lists the [Kubernetes roles]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) you'll assign to each host. The roles will be enabled later on in the configuration process. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane. In this guide, we will be installing all three roles on this node. The second node is also a Linux worker, which is responsible for running a DNS server, Ingress controller, Metrics server and Rancher Cluster agent. The third node, a Windows worker, will run your Windows containers. +You will provision three nodes: -Node | Operating System | Future Cluster Role(s) ---------|------------------|------ -Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) -Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) -Node 3 | Windows (Windows Server 2019 required) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) +- One Linux node, which manages the Kubernetes control plane and stores your `etcd` +- A second Linux node, which will be another worker node +- The Windows node, which will run your Windows containers as a worker node ->**Notes:** -> ->- If you are using AWS, you should choose *Microsoft Windows Server 2019 Base with Containers* as the Amazon Machine Image (AMI). ->- If you are using GCE, you should choose *Windows Server 2019 Datacenter for Containers* as the OS image. +| Node | Operating System | +| ------ | ------------------------------------------------------------ | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | +| Node 3 | Windows (Windows Server core version 1809 or above required) | -### Requirements +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) -- You can view the general requirements for Linux and Windows nodes in the [installation section]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). -- For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. -- For **VXLAN (Overlay)** networking, you must confirm that Windows Server 2019 has the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix installed. Most cloud-hosted VMs already have this hotfix. -- Your cluster must include at least one Linux worker node to run Rancher Cluster agent, DNS, Metrics server and Ingress related containers. -- Although we recommend the three node architecture listed in the table above, you can always add additional Linux and Windows workers to scale up your cluster for redundancy. +# 2. Create the Custom Cluster -## 2. Create the Custom Cluster +The instructions for creating a custom cluster that supports Windows nodes are very similar to the general [instructions for creating a custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster) with some Windows-specific requirements. -The instructions for creating a custom cluster that supports Windows nodes are very similar to the general [instructions for creating a custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#2-create-the-custom-cluster) with some Windows specific requirements. The entire process is documented below. +Windows support only be enabled if the cluster uses Kubernetes v1.15+ and the Flannel network provider. -1. From the main Rancher dashboard click on the **Clusters** tab and select **Add Cluster**. +1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. -1. The first section asks where the cluster is hosted. You should select **Custom**. +1. Click **From existing nodes (Custom)**. -1. Enter a name for your cluster in the **Cluster Name** text box. +1. Enter a name for your cluster in the **Cluster Name** text box. -1. {{< step_create-cluster_member-roles >}} +1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. -1. {{< step_create-cluster_cluster-options >}} +1. In the **Network Provider** field, select **Flannel.** - In order to use Windows workers, you must choose the following options: - - You must select `v1.14` or above for **Kubernetes Version**. - - You must select **Flannel** as the **Network Provider**. There are two options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - - You must select **Enable** for **Windows Support**. +1. In the **Windows Support** section, click **Enable.** -1. If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, see [Selecting Cloud Providers]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. +1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. 1. Click **Next**. +> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. +# 3. Add Nodes to the Cluster ->**Important:** If you are using *Host Gateway (L2bridge)* mode and hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. +This section describes how to register your Linux and Worker nodes to your custom cluster. -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) +### Add Linux Master Node -## 3. Add Linux Master Node +The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. -The first node in your cluster should be a Linux host that fills both *Control Plane* and *etcd* role. Both of these two roles must be fulfilled before you can add Windows hosts to your cluster. At a minimum, the node must have 2 roles enabled, but we recommend enabling all three. The following table lists our recommended settings (we'll provide the recommended settings for nodes 2 and 3 later). +In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. -Option | Setting --------|-------- -Node Operating System | Linux -Node Roles | etcd
Control Plane
Worker (optional) +1. In the **Node Operating System** section, click **Linux**. -1. For Node Operating System select **Linux**. +1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. From **Node Role**, choose at least **etcd** and **Control Plane**. +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -1. **Optional**: Click **Show advanced options** to specify IP address(es) to use when registering the node, override the hostname of the node or to add labels to the node. - - [Rancher Agent Options]({{< baseurl >}}/rancher/v2.x/en/admin-settings/agent-options/)
- [Kubernetes Documentation: Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) - -1. Copy the command displayed on the screen to your clipboard. - - >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. +1. Copy the command displayed on the screen to your clipboard. 1. SSH into your Linux host and run the command that you copied to your clipboard. @@ -143,19 +180,19 @@ Node Roles | etcd
Control Plane
Worker (optional) {{< result_create-cluster >}} -## 4. Add Linux Worker Node +It may take a few minutes for the node to be registered in your cluster. -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Add another Linux host, which will be used to support *Rancher cluster agent*, *Metrics server*, *DNS* and *Ingress* for your cluster. +### Add Linux Worker Node -1. Using the content menu, open the custom cluster your created in [2. Create the Custom Cluster](#2-create-the-custom-cluster). +After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. -1. From the main menu, select **Nodes**. +1. From the **Global** view, click **Clusters.** -1. Click **Edit Cluster**. +1. Go to the custom cluster that you created and click **Ellipsis (...) > Edit.** 1. Scroll down to **Node Operating System**. Choose **Linux**. -1. Select the **Worker** role. +1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. 1. Copy the command displayed on screen to your clipboard. @@ -163,19 +200,25 @@ After the initial provisioning of your custom cluster, your cluster only has a s 1. From **Rancher**, click **Save**. -**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. +**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. -## 5. Add Windows Workers +> **Note:** Taints on Linux Worker Nodes +> +> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. + +> | Taint Key | Taint Value | Taint Effect | +> | -------------- | ----------- | ------------ | +> | `cattle.io/os` | `linux` | `NoSchedule` | + +### Add a Windows Worker Node You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. -1. From the main menu, select **Nodes**. +1. From the **Global** view, click **Clusters.** -1. Click **Edit Cluster**. +1. Go to the custom cluster that you created and click **Ellipsis (...) > Edit.** -1. Scroll down to **Node Operating System**. Choose **Windows**. - -1. Select the **Worker** role. +1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. 1. Copy the command displayed on screen to your clipboard. @@ -183,43 +226,28 @@ You can add Windows hosts to a custom cluster by editing the cluster and choosin 1. From Rancher, click **Save**. -1. **Optional:** Repeat these instruction if you want to add more Windows nodes to your cluster. +1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. -**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. +**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. -## 6. Cloud-hosted VM Routes Configuration for Host Gateway Mode +### Optional Next Steps -If you are using the [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) backend of Flannel, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). - - -## 7. Configuration for Azure Files +# Configuration for Storage Classes in Azure If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a [storage class]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/#adding-storage-classes) for the cluster. In order to have the Azure platform create the required storage resources, follow these steps: -1. [Configure the Azure cloud provider.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) +1. [Configure the Azure cloud provider.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) -1. Configure `kubectl` to connect to your cluster. +1. Configure `kubectl` to connect to your cluster. -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for service account. +1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: --- apiVersion: rbac.authorization.k8s.io/v1 @@ -244,7 +272,7 @@ In order to have the Azure platform create the required storage resources, follo name: persistent-volume-binder namespace: kube-system -1. Create these in your cluster using one of the follow command. +1. Create these in your cluster using one of the follow command. ``` # kubectl create -f diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md index 2aa36f0243d..e9986f6abae 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md @@ -3,25 +3,28 @@ title: v2.1.x and v2.2.x Windows Documentation (Experimental) weight: 9100 --- ->**Note:** This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). - _Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ ->**Important:** -> ->Support for Windows nodes is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using Windows nodes in a production environment. +This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/). -When provisioning a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, you can use a mix of Linux and Windows hosts as your cluster nodes. +When you create a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. -This guide walks you through create of a custom cluster that includes 3 nodes: a Linux node, which serves as a Kubernetes control plane node; another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster; and a Windows node, which is assigned the Kubernetes worker role and runs your Windows containers. +You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. ->**Notes:** -> ->- For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). ->- Windows containers must run on Windows Server 1803 nodes. Windows Server 1709 and earlier versions do not support Kubernetes properly. ->- Containers built for Windows Server 1709 or earlier do not run on Windows Server 1803. You must build containers on Windows Server 1803 to run these containers on Windows Server 1803. +>**Important:** In versions of Rancher prior to v2.3, support for Windows nodes is experimental. Therefore, it is not recommended to use Windows nodes for production environments if you are using Rancher prior to v2.3. +This guide walks you through create of a custom cluster that includes three nodes: +- A Linux node, which serves as a Kubernetes control plane node +- Another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster +- A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers + +For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). + +## OS and Container Requirements + +- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. +- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. ## Objectives for Creating Cluster with Windows Support @@ -52,7 +55,7 @@ Node | Operating System | Future Cluster Role(s) --------|------------------|------ Node 1 | Linux (Ubuntu Server 16.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) Node 2 | Linux (Ubuntu Server 16.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) (This node is used for Ingress support) -Node 3 | Windows (*Windows Server 1803 required*) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) +Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) ### Requirements @@ -100,8 +103,6 @@ Option | Setting Node Operating System | Linux Node Roles | etcd
Control Plane
Worker -![Recommended Linux Control Plane Configuration]({{< baseurl >}}/img/rancher/linux-control-plane.png) - When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 8]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md new file mode 100644 index 00000000000..ee075c394de --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md @@ -0,0 +1,37 @@ +--- +title: Networking Requirements for Host Gateway (L2bridge) +weight: 1000 +--- + +This section describes how to configure custom Windows clusters that are using *Host Gateway (L2bridge)* mode. + +### Disabling Private IP Address Checks + +If you are using *Host Gateway (L2bridge)* mode and hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. + +Service | Directions to disable private IP address checks +--------|------------------------------------------------ +Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) +Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) (By default, a VM cannot forward a packet originated by another VM) +Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) + +### Cloud-hosted VM Routes Configuration + +If you are using the [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) backend of Flannel, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. + +- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. + +- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. + +To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: + +```bash +kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR +``` + +Then follow the instructions for each cloud provider to configure routing rules for each node: + +Service | Instructions +--------|------------- +Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). +Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). diff --git a/content/rancher/v2.x/en/contributing/_index.md b/content/rancher/v2.x/en/contributing/_index.md index d54ff552f9c..3965c2e7783 100644 --- a/content/rancher/v2.x/en/contributing/_index.md +++ b/content/rancher/v2.x/en/contributing/_index.md @@ -5,7 +5,19 @@ aliases: - /rancher/v2.x/en/faq/contributing/ --- -### Repositories +This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. + +For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: + +- How to set up the Rancher development environment and run tests +- The typical flow of an issue through the development lifecycle +- Coding guidelines and development best practices +- Debugging and troubleshooting +- Developing the Rancher API + +On the Rancher Users Slack, the channel for developers is **#developer**. + +# Repositories All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. @@ -24,51 +36,51 @@ CLI | https://github.com/rancher/cli | This repository is the source code for th Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. -To see all libraries/projects used in Rancher, see the `vendor.conf` in the `rancher/rancher` repository. +To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. ![Rancher diagram]({{< baseurl >}}/img/rancher/ranchercomponentsdiagram.svg)
Rancher components used for provisioning/managing Kubernetes clusters. -### Building +# Building -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository (plus additional `trash` commands, please see below for more information about using `trash`), and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. +Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -Dependencies on other libraries/projects are managed using [Trash](https://github.com/rancher/trash). See the [Trash README](https://github.com/rancher/trash/blob/master/README.md) to discover how it can be used. In short, it uses a `vendor.conf` file to specify the source repository and revision to fetch, checkout and copy to the `./vendor` directory. After updating `vendor.conf`, you can run `make trash` to update dependencies for your change. When the dependencies are updated, you can build the project again using `make` so that it will be built using the updated dependencies. - -### Bugs, Issues or Questions +# Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). +### Checklist for Filing Issues + Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. >**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. >**Important:** Please remove any sensitive data as it will be publicly viewable. -- Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - - Hosts (What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce) - - Operating System (What operating system are you using. Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used) - - Docker (What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info`) - - Environment (Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer) - - Rancher (What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host) - - Clusters (What kind of cluster did you create, how did you create it, what did you specify when you were creating it) -- Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. +- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: + - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce + - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used + - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` + - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer + - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host + - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it +- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. -- Provide data/logs from the used resources. +- **Logs:** Provide data/logs from the used resources. - Rancher - - Single node + - Docker install ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') ``` - - High Availability (HA) install using `kubectl` + - Kubernetes install using `kubectl` - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. ``` kubectl -n cattle-system \ @@ -76,16 +88,16 @@ Please follow this checklist when filing an issue which will helps us investigat -l app=rancher \ --timestamps=true ``` - - High Availability (HA) install using `docker` on each of the nodes in the RKE cluster + - Docker install using `docker` on each of the nodes in the RKE cluster ``` docker logs \ --timestamps \ $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') ``` - - High Availability (HA) RKE Add-On Install + - Kubernetes Install with RKE Add-On - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. ``` kubectl -n cattle-system \ @@ -99,10 +111,9 @@ Please follow this checklist when filing an issue which will helps us investigat - `/var/log/kern.log` - Docker daemon logging (these might not all exist, depending on operating system) - `/var/log/docker.log` +- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. -If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -### Docs +# Docs If you have any updates to our documentation, please make any pull request to our docs repo. diff --git a/content/rancher/v2.x/en/faq/_index.md b/content/rancher/v2.x/en/faq/_index.md index fa194605866..60f260984b5 100644 --- a/content/rancher/v2.x/en/faq/_index.md +++ b/content/rancher/v2.x/en/faq/_index.md @@ -7,121 +7,66 @@ aliases: This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. -See [Technical FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. +See [Technical FAQ]({{}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. -### Kubernetes +
-#### What does it mean when you say Rancher v2.x is built on Kubernetes? - -Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. - -#### Do you plan to implement upstream Kubernetes, or continue to work on your own fork? - -We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. - -#### Does this release mean that we need to re-train our support staff in Kubernetes? - -Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. - -#### So, wait. Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI. - -No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. - -### Cattle - -### How does Rancher v2.x affect Cattle? - -Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. - -#### Can I migrate existing Cattle workloads into Kubernetes? - -Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. - -### Environments & Clusters - -#### Can I still create templates for environments and clusters? - -Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. -Kubernetes Cluster Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. - -#### Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher) - -Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. - -### Upgrading/Migrating - -#### How would the migration from v1.x to v2.x work? - -Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. - -#### Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters? - -At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. - -#### Can I import OpenShift Kubernetes clusters into v2.x? - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -### Support - -#### What about Rancher v1.6? Are you planning some long-term support releases? - -That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it for the next 12 months at a minimum. We will extend that time period, if necessary, depending on how quickly users move to v2.1. - -#### Does Rancher v2.x support Docker Swarm and Mesos as environment types? +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. -#### Is it possible to manage Azure Kubernetes Services with Rancher v2.x? +
+ +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + Yes. -#### What about Windows support? +
-With [Rancher 2.3.0 Preview 1](https://forums.rancher.com/t/rancher-release-v2-3-0-alpha3-preview-of-windows-containers/14260), we have enabled the support for Windows Server 2019 containers. The technology is in preview mode but we intend to make it GA later this year. Please refer to our documentation and Release Notes to get the latest information on this feature. +**Does Rancher support Windows?** -#### Are you planning on supporting Istio in Rancher v2.x? +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) -[Rancher 2.3.0 Preview 2](https://forums.rancher.com/t/rancher-release-v2-3-0-alpha5-preview-of-istio/14585/2) has support for Istio. Please refer to our documentation and Release Notes to get the latest information on this feature. -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along wtih any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/). +
-#### Will Rancher v2.x support Hashicorp's Vault for storing secrets? +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
+ +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** Secrets management is on our roadmap but we haven't assigned it to a specific release yet. -#### Does Rancher v2.x support RKT containers as well? +
+ +**Does Rancher v2.x support RKT containers as well?** At this time, we only support Docker. -#### Will Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes? +
-We will provide the ability to use Calico, Canal, and Flannel, but always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) on what is officially supported. +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** -#### Are you planning on supporting Traefik for existing setups? +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
+ +**Are you planning on supporting Traefik for existing setups?** We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. -### General +
-#### Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x? +**Can I import OpenShift Kubernetes clusters into v2.x?** -Yes. We plan to eventually enhance this feature so you can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. -#### Are you going to integrate Longhorn? +
-Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project once v2.0 reaches GA (general availability). +**Are you going to integrate Longhorn?** -#### Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC? - -The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. - -#### Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x? - -Yes. You can do so by leveraging Kubernetes' network policies. - -#### What about the CLI? Will that work the same way with the same features? - -Yes. Definitely. - -#### If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure? - -Absolutely. +Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md index eadea3ea782..08ae7cf4f70 100644 --- a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md +++ b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md @@ -1,5 +1,6 @@ --- -title: CNI Providers +title: Container Network Interface (CNI) Providers +description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you weight: 2300 --- @@ -53,11 +54,11 @@ Canal is a CNI network provider that gives you the best of Flannel and Calico. I In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) -![Canal Diagram]({{< baseurl >}}/img/rancher/canal-diagram.png) +{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} -For more information, see the [Canal GitHub Page](https://github.com/projectcalico/canal). +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) #### Flannel @@ -67,7 +68,7 @@ Flannel is a simple and easy way to configure L3 network fabric designed for Kub Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. ![Flannel Diagram]({{< baseurl >}}/img/rancher/flannel-diagram.png) @@ -81,7 +82,7 @@ Calico enables networking and network policy in Kubernetes clusters across the c Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. -Kubernetes workers should open TCP port `179` (BGP). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. ![Calico Diagram]({{< baseurl >}}/img/rancher/calico-diagram.svg) @@ -99,7 +100,7 @@ _Available as of v2.2.0_ Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. For more information, see the following pages: @@ -126,20 +127,20 @@ The following table summarizes the different features available for each CNI net - External Datastore: CNI network providers with this feature need an external datastore for its data. -- Encyption: This feature allows cyphered and secure network control and data planes. +- Encryption: This feature allows cyphered and secure network control and data planes. - Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. #### CNI Community Popularity -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in July 2018. +The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. | Provider | Project | Stars | Forks | Contributors | | ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 580 | 84 | 20 | -| flannel | https://github.com/coreos/flannel | 3980 | 987 | 123 | -| Calico | https://github.com/projectcalico/calico | 953 | 305 | 101 | -| Weave | https://github.com/weaveworks/weave/ | 5457 | 501 | 63 | +| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | +| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | +| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | +| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 |
### Which CNI Provider Should I Use? diff --git a/content/rancher/v2.x/en/faq/removing-rancher/_index.md b/content/rancher/v2.x/en/faq/removing-rancher/_index.md new file mode 100644 index 00000000000..01b53b46358 --- /dev/null +++ b/content/rancher/v2.x/en/faq/removing-rancher/_index.md @@ -0,0 +1,58 @@ +--- +title: Rancher is No Longer Needed +weight: 8010 +aliases: + - /rancher/v2.x/en/installation/removing-rancher/cleaning-cluster-nodes/ + - /rancher/v2.x/en/installation/removing-rancher/ + - /rancher/v2.x/en/admin-settings/removing-rancher/ + - /rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/ + - /rancher/v2.x/en/removing-rancher/ +--- + +This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. + +- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) +- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) +- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) +- [What if I don't want my imported cluster managed by Rancher?](#what-if-i-don-t-want-my-imported-cluster-managed-by-rancher) +- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) + +### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? + +If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. + +### If the Rancher server is deleted, how do I access my downstream clusters? + +The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: + +- **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. +- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. +- **RKE clusters:** To access an [RKE cluster,]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.x/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. + +### What if I don't want Rancher anymore? + +If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.x/en/installation/k8s-install/) remove Rancher by using the [System Tools]({{}}/rancher/v2.x/en/system-tools/) with the `remove` subcommand. + +If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. + +Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) + +### What if I don't want my imported cluster managed by Rancher? + +If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. + +To detach the cluster, + +1. From the **Global** view in Rancher, go to the **Clusters** tab. +2. Go to the imported cluster that should be detached from Rancher and click **Ellipsis (...) > Delete.** +3. Click **Delete.** + +**Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. + +### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? + +At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. + +The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) + +For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) \ No newline at end of file diff --git a/content/rancher/v2.x/en/faq/security/_index.md b/content/rancher/v2.x/en/faq/security/_index.md index 670cf73870b..733b79dbf05 100644 --- a/content/rancher/v2.x/en/faq/security/_index.md +++ b/content/rancher/v2.x/en/faq/security/_index.md @@ -4,10 +4,12 @@ weight: 8007 --- -### Is there a Hardening Guide? +**Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. -### What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked? +
+ +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. diff --git a/content/rancher/v2.x/en/faq/technical/_index.md b/content/rancher/v2.x/en/faq/technical/_index.md index da0b2063fbc..e901475ca57 100644 --- a/content/rancher/v2.x/en/faq/technical/_index.md +++ b/content/rancher/v2.x/en/faq/technical/_index.md @@ -3,50 +3,50 @@ title: Technical weight: 8006 --- -### How can I reset the admin password? +### How can I reset the administrator password? -Single node install: +Docker Install: ``` $ docker exec -ti reset-password -New password for default admin user (user-xxxxx): +New password for default administrator (user-xxxxx): ``` -High Availability install (Helm): +Kubernetes install (Helm): ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password -New password for default admin user (user-xxxxx): +New password for default administrator (user-xxxxx): ``` -High Availability install (RKE add-on): +Kubernetes install (RKE add-on): ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password -New password for default admin user (user-xxxxx): +New password for default administrator (user-xxxxx): ``` ### I deleted/deactivated the last admin, how can I fix it? -Single node install: +Docker Install: ``` $ docker exec -ti ensure-default-admin -New default admin user (user-xxxxx) -New password for default admin user (user-xxxxx): +New default administrator (user-xxxxx) +New password for default administrator (user-xxxxx): ``` -High Availability install (Helm): +Kubernetes install (Helm): ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin -New password for default admin user (user-xxxxx): +New password for default administrator (user-xxxxx): ``` -High Availability install (RKE add-on): +Kubernetes install (RKE add-on): ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml $ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin @@ -56,7 +56,7 @@ New password for default admin user (user-xxxxx): ### How can I enable debug logging? -* Single node install +* Docker Install * Enable ``` $ docker exec -ti loglevel --set debug @@ -70,7 +70,7 @@ $ docker exec -ti loglevel --set info OK ``` -* High Availability install (Helm) +* Kubernetes install (Helm) * Enable ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml @@ -90,7 +90,7 @@ OK OK ``` -* High Availability install (RKE add-on) +* Kubernetes install (RKE add-on) * Enable ``` $ KUBECONFIG=./kube_config_rancher-cluster.yml @@ -120,8 +120,8 @@ The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, thi ### Where is the state of Rancher stored? -- Single node install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. -- High Availability install: in the etcd of the RKE cluster created to run Rancher. +- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. +- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. ### How are the supported Docker versions determined? diff --git a/content/rancher/v2.x/en/faq/telemetry/_index.md b/content/rancher/v2.x/en/faq/telemetry/_index.md index c8b04b9e05d..6ab582667e1 100644 --- a/content/rancher/v2.x/en/faq/telemetry/_index.md +++ b/content/rancher/v2.x/en/faq/telemetry/_index.md @@ -29,4 +29,4 @@ If Telemetry is not enabled, the process that collects the data is not running, ### How do I turn it on or off? -After initial setup, an admin user can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. +After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md b/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md new file mode 100644 index 00000000000..e0aa7ff6a3c --- /dev/null +++ b/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md @@ -0,0 +1,104 @@ +--- +title: Questions about Upgrading to Rancher v2.x +weight: 1 +--- + +This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. + +# Kubernetes + +**What does it mean when you say Rancher v2.x is built on Kubernetes?** + +Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. + +
+ +**Do you plan to implement upstream Kubernetes, or continue to work on your own fork?** + +We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. + +
+ +**Does this release mean that we need to re-train our support staff in Kubernetes?** + +Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. + +
+ +**Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI.** + +No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. + +
+ +**If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure?** + +Absolutely. + +# Cattle + +**How does Rancher v2.x affect Cattle?** + +Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. + +
+ +**Can I migrate existing Cattle workloads into Kubernetes?** + +Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. + +# Feature Changes + +**Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x?** + +Yes. You can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. + +
+ +**Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC?** + +The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. + +
+ +**Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x?** + +Yes. You can do so by leveraging Kubernetes' network policies. + +
+ +**What about the CLI? Will that work the same way with the same features?** + +Yes. Definitely. + +# Environments & Clusters + +**Can I still create templates for environments and clusters?** + +Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. + +Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. + +
+ +**Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher)** + +Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. + +# Upgrading/Migrating + +**How would the migration from v1.x to v2.x work?** + +Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. + +
+ +**Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters?** + +At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. + +# Support + +**Are you planning some long-term support releases for Rancher v1.6?** + +That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it. New releases of the v1.6 stream are announced in the [Rancher forums.](https://forums.rancher.com/c/announcements) The Rancher wiki contains the [v1.6 release notes.](https://github.com/rancher/rancher/wiki/Rancher-1.6) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/_index.md b/content/rancher/v2.x/en/installation/_index.md index 55431c72e4f..234d9457590 100644 --- a/content/rancher/v2.x/en/installation/_index.md +++ b/content/rancher/v2.x/en/installation/_index.md @@ -1,25 +1,68 @@ --- -title: Installation +title: Installing Rancher +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation weight: 50 --- -This section contains instructions for installing Rancher in development and production environments. -### Installation Options +This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. -- [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/) +### Terminology - Install Rancher on a single Linux host. Single node installs are recommended for development and test environments, as setup is simple and the server doesn't have to be readily available for a user base—only the developer or tester. +In this section, -- [High Availability Installation]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) +**The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. - This install scenario creates a new Kubernetes cluster dedicated to running Rancher Server in a high-availability (HA) configuration, which runs Rancher Server on multiple hosts so that it's always accessible provided that one of your cluster nodes is running. We recommend high-availability installs in production environments, where your user base requires 24/7 access to your applications. +**RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. When you create a cluster in the Rancher UI, it calls RKE as a library to provision Rancher-launched Kubernetes clusters. -### Reference +### Overview of Installation Options -- [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) +If you use Rancher to deploy Kubernetes clusters, it is important to ensure that the Rancher server doesn't fail, because if it goes down, you could lose access to the Kubernetes clusters that are managed by Rancher. For that reason, we recommend that for a production-grade architecture, you should set up a Kubernetes cluster with RKE, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. - A reference of hardware and software requirements for the server(s) hosting Rancher. +For testing or demonstration purposes, you can install Rancher in single Docker container. In this installation, you can use Rancher to set up Kubernetes clusters out-of-the-box. -- [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) +Our [instructions for installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/k8s-install) describe how to first use RKE to create and manage a cluster, then install Rancher onto that cluster. For this type of architecture, you will need to deploy three nodes - typically virtual machines - in the infrastructure provider of your choice. You will also need to configure a load balancer to direct front-end traffic to the three nodes. When the nodes are running and fulfill the [node requirements,]({{}}/rancher/v2.x/en/installation/requirements) you can use RKE to deploy Kubernetes onto them, then use Helm to deploy Rancher onto Kubernetes. - List of required ports you must open to operate Rancher. +For a longer discussion of Rancher architecture, refer to the [architecture overview,]({{}}/rancher/v2.x/en/overview/architecture) [recommendations for production-grade architecture,]({{}}/rancher/v2.x/en/overview/architecture-recommendations) or our [best practices guide.]({{}}/rancher/v2.x/en/best-practices/deployment-types) + +Rancher can be installed on these main architectures: + +- **High-availability Kubernetes Install:** We recommend using [Helm,]({{}}/rancher/v2.x/en/overview/concepts/#about-helm) a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. We recommend using three nodes in the cluster because increased availability is achieved by running Rancher on multiple nodes. +- **Single-node Kubernetes Install:** Another option is to install Rancher with Helm on a Kubernetes cluster, but to only use a single node in the cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. +- **Docker Install:** For test and demonstration purposes, Rancher can be installed with Docker on a single node. This installation works out-of-the-box, but there is no migration path from a Docker installation to a high-availability installation on a Kubernetes cluster. Therefore, you may want to use a Kubernetes installation from the start. + +The single-node Kubernetes install is achieved by describing only one node in the `cluster.yml` when provisioning the Kubernetes cluster with RKE. The single node should have all three roles: `etcd`, `controlplane`, and `worker`. Then Rancher can be installed with Helm on the cluster in the same way that it would be installed on any other cluster. + +There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: + +| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | +| ---------------------------------- | ------------------------------ | ---------- | +| With direct access to the Internet | [Docs]({{}}/rancher/v2.x/en/installation/k8s-install/) | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) | +| Behind an HTTP proxy | These [docs,]({{}}/rancher/v2.x/en/installation/k8s-install/) plus this [configuration]({{}}/rancher/v2.x/en/installation/options/chart-options/#http-proxy) | These [docs,]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node) plus this [configuration]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node/proxy/) | +| In an air gap environment | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) | [Docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) | + +### Prerequisites +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +### Architecture Tip + +For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. + +For more architecture recommendations, refer to [this page.]({{}}/rancher/v2.x/en/overview/architecture-recommendations) + +### More Options for Installations on a Kubernetes Cluster + +Refer to the [Helm chart options]({{}}/rancher/v2.x/en/installation/options/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: + +- With [API auditing to record all transactions]({{}}/rancher/v2.x/en/installation/options/chart-options/#api-audit-log) +- With [TLS termination on a load balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) +- With a [custom Ingress]({{}}/rancher/v2.x/en/installation/options/chart-options/#customizing-your-ingress) + +In the Rancher installation instructions, we recommend using RKE (Rancher Kubernetes Engine) to set up a Kubernetes cluster before installing Rancher on the cluster. RKE has many configuration options for customizing the Kubernetes cluster to suit your specific environment. Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. + +### More Options for Installations with Docker + +Refer to the [Docker installation docs]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) for details other configurations including: + +- With [API auditing to record all transactions]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log) +- With an [external load balancer]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb/) +- With a [persistent data store]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#persistent-data) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/_index.md deleted file mode 100644 index 6cc0ee32a10..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Air Gap: High Availability Install" -weight: 290 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/ ---- - -## Prerequisites - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). - -The following CLI tools are required for this install. Make sure these tools are installed on your workstation and available in your `$PATH`. - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -* [rke]({{< baseurl >}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. - ->**Note:** If you install Rancher in an HA configuration in an air gap environment, you cannot transition to a single-node setup during future upgrades. - -## Installation Outline - -- [1. Create Nodes and Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/) -- [2. Collect and Publish Image Sources]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/) -- [3. Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/install-kube/) -- [4. Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/) -- [5. Configure Rancher for the Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/) -- [6. Configure Rancher System Charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/) - -### [Next: Create Nodes and Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/_index.md deleted file mode 100644 index 9dc14874cd3..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "5. Configure Rancher for the Private Registry" -weight: 500 -aliases: - ---- - -Rancher needs to be configured to use the private registry in order to provision any [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or [Rancher tools]({{< baseurl >}}/rancher/v2.x/en/tools/). - ->**Note:** If you want to configure Rancher to use your private registry when starting the rancher/rancher container, you can use the environment variable `CATTLE_SYSTEM_DEFAULT_REGISTRY`. - -1. Log into Rancher and configure the default admin password. - -1. Go into the **Settings** view. - - ![Settings]({{< baseurl >}}/img/rancher/airgap/settings.png) - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - ![Edit]({{< baseurl >}}/img/rancher/airgap/edit-system-default-registry.png) - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - ![Save]({{< baseurl >}}/img/rancher/airgap/enter-system-default-registry.png) - -### [Next: Configure Rancher System Charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md deleted file mode 100644 index 50c2ddd93d1..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "6. Configure Rancher System Charts" -weight: 600 -aliases: ---- - -# A. Prepare System Charts - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. - -Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. - -# B. Configure System Charts - -Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. - -### Configuring the Registry from the Rancher UI - -In the catalog management page in the Rancher UI, follow these steps: - -1. Go to the **Global** view. - -1. Click **Tools > Catalogs.** - -1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **Ellipsis (...) > Edit.** - -1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. - -1. Click **Save.** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -### Configuring the Registry in Rancher's API View - -1. Log into Rancher. - -1. Open `https:///v3/catalogs/system-library` in your browser. - - ![Open]({{< baseurl >}}/img/rancher/airgap/system-charts-setting.png) - -1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - - ![Update]({{< baseurl >}}/img/rancher/airgap/system-charts-update.png) - -1. Click **Show Request** - -1. Click **Send Request** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/install-kube/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/install-kube/_index.md deleted file mode 100644 index cbd37612477..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/install-kube/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "3. Install Kubernetes with RKE" -weight: 300 -aliases: - ---- - -## A. Create an RKE Config File - -From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts) you created. - ->**Tip:** For more details on the options available, see the RKE [Config Options]({{< baseurl >}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------- | -------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gap network. | -| `user` | ✓ | A user that can run docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: -- address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: [ "controlplane", "etcd", "worker" ] - ssh_key_path: /home/user/.ssh/id_rsa -- address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: [ "controlplane", "etcd", "worker" ] - ssh_key_path: /home/user/.ssh/id_rsa -- address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: [ "controlplane", "etcd", "worker" ] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: -- url: # private registry url - user: rancher - password: "*********" - is_default: true -``` - -## B. Run RKE - -After configuring `rancher-cluster.yml`, open Terminal and change directories to the RKE binary. Then enter the command below to stand up your high availability cluster. - -``` -rke up --config ./rancher-cluster.yml -``` - -## C. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{< baseurl >}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -### [Next: Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/_index.md deleted file mode 100644 index 1c060e09aef..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/_index.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/install-rancher/ ---- - -## A. Add the Helm Chart Repository - - -From a system that has access to the internet, render the installs and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, initialize `helm` locally on a system that has internet access. - - ```plain - helm init -c - ``` - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - ->Want additional options? Need help troubleshooting? See [High Availability Install: Advanced Options]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/#advanced-configurations). - - -## B. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -For HA air gap configurations, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -|-----|-----|-----|-----| -| [Rancher Generated Self-Signed Certificates](#self-signed) | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** | yes | -| [Certificates from Files](#secret) | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s) | no | - -## C. Install Rancher - -Based on the choice your made in [B. Choose your SSL Configuration](#b-optional-install-cert-manager), complete one of the procedures below. - -{{% accordion id="self-signed" label="Option A: Default Self-Signed Certificate" %}} -By default, Rancher generates a CA and uses cert manger to issue the certificate for access to the Rancher server interface. - -1. From a system connected to the internet, fetch the latest cert-manager chart available from the [official Helm chart repository](https://github.com/helm/charts/tree/master/stable). - - ```plain - helm fetch stable/cert-manager --version 0.5.2 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - ```plain - helm template ./cert-manager-v0.5.2.tgz --output-dir . \ - --name cert-manager --namespace kube-system \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - ``` - -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry.). - - - ```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher - ``` - -{{% /accordion %}} - -{{% accordion id="secret" label="Option B: Certificates for Files (Kubernetes Secret)" %}} - -1. Create Kubernetes secrets from your own certificates for Rancher to use. - - > **Note:** The common name for the cert will need to match the `hostname` option or the ingress controller will fail to provision the site for Rancher. - -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - - - > **Note:** If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret` - - ``` - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret -``` - -1. See [Adding TLS Secrets]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. -{{% /accordion %}} - -## D. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you are using self-signed certificates, install cert-manager: - -```plain -kubectl -n kube-system apply -R -f ./cert-manager -``` - -Install rancher: - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` - -### [Next: Configure Rancher for the Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/_index.md deleted file mode 100644 index 68edfac7547..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: "2. Prepare Private Registry" -weight: 200 -aliases: - - /rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/ ---- - -## A. Collect Images - -Start by collecting all the images needed to install Rancher in an air gap environment. You'll collect images from your chosen Rancher release, RKE, and (if you're using a self-signed TLS certificate) Cert-Manager. - -1. Using a computer with internet access, browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - - ![Choose Release Version]({{< baseurl >}}/img/rancher/choose-release-version.png) - -2. From the release's **Assets** section (pictured above), download the following three files, which are required to install Rancher in an air gap environment: - - - | Release File | Description | - | --- | --- | - | `rancher-images.txt` | This file contains a list of all files needed to install Rancher. - | `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | - | `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - -1. Make `rancher-save-images.sh` an executable. - - ``` - chmod +x rancher-save-images.sh - ``` - - -1. From the directory that contains the RKE binary, add RKE's images to `rancher-images.txt`, which is a list of all the files needed to install Rancher. - - ``` - rke config --system-images >> ./rancher-images.txt - ``` -1. **Default Rancher Generated Self-Signed Certificate Users Only:** If you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://github.com/helm/charts/tree/master/stable/cert-manager) image to `rancher-images.txt` as well. You may skip this step if you are using you using your own certificates. - - 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details. - - ```plain - helm fetch stable/cert-manager --version 0.5.2 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - - 2. Sort and unique the images list to remove any overlap between the sources. - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images. - - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - - **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -## B. Publish Images - - -Using a computer with access to the internet and your private registry, move the images from `rancher-images.txt` to your private registry using the image scripts. - ->**Note:** Image publication may require up to 20GB of empty disk space. - -1. Log into your private registry if required. - - ```plain - docker login - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry. - - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` - -### [Next: Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/install-kube/) diff --git a/content/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/_index.md b/content/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/_index.md deleted file mode 100644 index 1048763f0d8..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "1. Create Nodes and Load Balancer" -weight: 100 -aliases: ---- -Provision three air gapped Linux hosts according to our requirements below to launch Rancher in an HA configuration. - -These hosts should be disconnected from the internet, but should have connectivity with your private registry. - -### Host Requirements - -View hardware and software requirements for each of your cluster nodes in [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
HA Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
- -![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) - -### Load Balancer - -RKE, the installer that provisions your air gapped cluster, will configure an Ingress controller pod on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. - -Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. - ->**Important:** ->Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -**Load Balancer Configuration Samples:** - -- [NGINX]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/nginx) -- [Amazon NLB]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb) - -### [Next: Collect and Publish Image Sources]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/) diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/_index.md deleted file mode 100644 index ee1286bad76..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Air Gap: Single Node Install" -weight: 280 ---- - -## Prerequisites - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machine. If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). - ->**Note:** If you install Rancher on a single node in an air gap environment, you cannot transition to a HA configuration during future upgrades. - -## Installation Outline - -- [1. Provision Linux Host]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/provision-host/) -- [2. Prepare Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/) -- [3. Choose an SSL Option and Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/) -- [4. Configure Rancher for Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/) -- [5. Configure Rancher System Charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/) - -### [Next: Provision Linux Host]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/provision-host/) diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/_index.md deleted file mode 100644 index cbb98837736..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "4. Configure Rancher for the Private Registry" -weight: 400 -aliases: ---- - -Rancher needs to be configured to use the private registry in order to provision any [Rancher launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or [Rancher tools]({{< baseurl >}}/rancher/v2.x/en/tools/). - -1. Log into Rancher and configure the default admin password. - -1. Go into the **Settings** view. - - ![Settings]({{< baseurl >}}/img/rancher/airgap/settings.png) - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - ![Edit]({{< baseurl >}}/img/rancher/airgap/edit-system-default-registry.png) - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - ![Save]({{< baseurl >}}/img/rancher/airgap/enter-system-default-registry.png) - ->**Note:** If you want to configure the setting when starting the rancher/rancher container, you can use the environment variable `CATTLE_SYSTEM_DEFAULT_REGISTRY`. - -### [Next: Configure Rancher System Charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/) diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md deleted file mode 100644 index d322108daf5..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "5. Configure Rancher System Charts" -weight: 500 -aliases: ---- - -# A. Prepare System Charts - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. - -Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. - -# B. Configure System Charts - -Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. - -### Configuring the Registry from the Rancher UI - -In the catalog management page in the Rancher UI, follow these steps: - -1. Go to the **Global** view. - -1. Click **Tools > Catalogs.** - -1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **Ellipsis (...) > Edit.** - -1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. - -1. Click **Save.** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -### Configuring the Registry in Rancher's API View - -1. Log into Rancher. - -1. Open `https:///v3/catalogs/system-library` in your browser. - - ![Open]({{< baseurl >}}/img/rancher/airgap/system-charts-setting.png) - -1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - - ![Update]({{< baseurl >}}/img/rancher/airgap/system-charts-update.png) - -1. Click **Show Request** - -1. Click **Send Request** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/_index.md deleted file mode 100644 index bf50909cc44..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "3. Choose an SSL Option and Install Rancher" -weight: 300 -aliases: ---- - -For development and testing in air gap environments, we recommend installing Rancher by running a single Docker container. In this installation scenario, you'll deploy Rancher to your air gap host using an image pulled from your private registry. - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - ->**Do you want to...** -> ->- Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{< baseurl >}}/rancher/v2.x/en/admin-settings/custom-ca-root-certificate/). ->- Record all transactions with the Rancher API? See [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#api-audit-log). - - -Choose from the following options: - - -{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. Replace `` with your private registry URL and port. Replace `` with release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/) that you want to install. - - - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - /rancher/rancher: - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - ->**Prerequisites:** ->From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> ->- The certificate files must be in [PEM format]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#pem). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#cert-order). - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -When entering the command, use the table below to replace each placeholder. - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's private key. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/) that you want to install. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - /rancher/rancher: -``` - - -{{% /accordion %}} -{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - ->**Prerequisite:** The certificate files must be in [PEM format]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#pem). - -After obtaining your certificate, run the Docker command below, replacing each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -When entering the command, use the table below to replace each placeholder. - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | Your private registry URL and port. Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. -`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/) that you want to install. - - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - /rancher/rancher: --no-cacerts -``` - -{{% /accordion %}} - -### [Next: Configure Rancher for the Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/) diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/_index.md deleted file mode 100644 index f7b74daa966..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "2. Prepare Private Registry" -weight: 200 -aliases: ---- - -## A. Collect Image Sources - -Using a computer with internet access, browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher 2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -![Choose Release Version]({{< baseurl >}}/img/rancher/choose-release-version.png) - -From the release's **Assets** section, download the following three files, which are required to install Rancher in an air gap environment: - - -| Release File | Description | -| --- | --- | -| `rancher-images.txt` | This file contains a list of all files needed to install Rancher. -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - -## B. Publish Images - -After collecting the release files, publish the images from `rancher-images.txt` to your private registry using the image scripts. - ->**Note:** Image publication may require up to 20GB of empty disk space. - -1. From a system with internet access, use the `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images. - - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - -1. Copy `rancher-load-images.sh`, `rancher-images.txt` and `rancher-images.tar.gz` files to the [Linux host]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/provision-host) that you've provisioned by completing the substeps below. - - 1. Log into your registry if required. - - ```plain - docker login - ``` - - 1. Use `rancher-load-images.sh` to extract, tag and push the images to your private registry. - - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` - -### [Next: Choose an SSL Option and Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/air-gap-single-node/provision-host/_index.md b/content/rancher/v2.x/en/installation/air-gap-single-node/provision-host/_index.md deleted file mode 100644 index 567cf6d0074..00000000000 --- a/content/rancher/v2.x/en/installation/air-gap-single-node/provision-host/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "1. Provision Linux Host" -weight: 100 -aliases: ---- - -Provision a single, air gapped Linux host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements) to launch your {{< product >}} Server. - -This host should be disconnected from the internet, but should have connectivity with your private registry. - -### [Next: Prepare Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/) diff --git a/content/rancher/v2.x/en/installation/ha/_index.md b/content/rancher/v2.x/en/installation/ha/_index.md deleted file mode 100644 index 63cef81af17..00000000000 --- a/content/rancher/v2.x/en/installation/ha/_index.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: High Availability (HA) Install -weight: 275 ---- - -For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. - -This procedure walks you through setting up a 3-node cluster with RKE and installing the Rancher chart with the Helm package manager. - -> **Important:** It is not supported, nor generally a good idea, to run Rancher on top of hosted Kubernetes solutions such as Amazon's EKS, or Google's GKE. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. It is strongly recommended to use hosted infrastructure such as EC2 or GCE instead. - -> **Important:** For the best performance, we recommend this Kubernetes cluster to be dedicated only to run Rancher. After the Kubernetes cluster to run Rancher is setup, you can [create or import clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. - -## Recommended Architecture - -* DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
HA Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) -HA Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers - -## Required Tools - -The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -* [rke]({{< baseurl >}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. - -> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. - -## Installation Outline - -- [Create Nodes and Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/) -- [Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/) -- [Initialize Helm (tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) -- [Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/) - -## Additional Install Options - -* [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) - -## Previous Methods - -[RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/) - -> ##### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). -> ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md deleted file mode 100644 index 1c9845e9cdc..00000000000 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: RKE Add-On Install -weight: 276 ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). -> ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - - -* [High Availability Installation with External Load Balancer (TCP/Layer 4)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb) -* [High Availability Installation with External Load Balancer (HTTPS/Layer 7)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb) -* [HTTP Proxy Configuration for a High Availability Installation]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/proxy/) -* [Troubleshooting RKE Add-on Installs]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md deleted file mode 100644 index 0a99effcaba..00000000000 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: NGINX Configuration -weight: 277 -aliases: -- /rancher/v2.x/en/installation/ha-server-install-external-lb/nginx/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). -> ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. - -For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -## Create NGINX Configuration - -After installing NGINX, you need to create the NGINX config file, `/etc/nginx/conf.d/rancher.conf`, with the IP addresses for your Linux nodes, chosen FQDN and location of the certificate file and certificate key file. - ->**Note:** The example configuration below does not include all available Nginx options and may not be suitable for your production environment. For full configuration documentation, see [NGINX Load Balancing - HTTP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -1. Copy and paste the code sample below into your favorite text editor. Save it as `/etc/nginx/conf.d/rancher.conf`. - - **Example NGINX config:** - ``` - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } - ``` - -2. In `/etc/nginx/conf.d/rancher.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your Linux hosts. -3. In `/etc/nginx/conf.d/rancher.conf`, replace `FQDN` with the FQDN you chose for your Rancher installation. -4. In `/etc/nginx/conf.d/rancher.conf`, replace `/certs/fullchain.pem` with the path to your certificate. If there are intermediates required for you certificate, they should be included in this file. -5. In `/etc/nginx/conf.d/rancher.conf`, replace `/certs/privkey.pem` with the path to your certificate key. - - - -## Run NGINX - -* Reload or restart NGINX - - ```` - # Reload NGINX - nginx -s reload - - # Restart NGINX - # Depending on your Linux distribution - service nginx restart - systemctl restart nginx - ```` - -## Browse to Rancher UI - -You should now be to able to browse to `https://FQDN`. diff --git a/content/rancher/v2.x/en/installation/how-ha-works/_index.md b/content/rancher/v2.x/en/installation/how-ha-works/_index.md new file mode 100644 index 00000000000..a1061895611 --- /dev/null +++ b/content/rancher/v2.x/en/installation/how-ha-works/_index.md @@ -0,0 +1,25 @@ +--- +title: About High-availability Installations +weight: 2 +--- + +We recommend using [Helm,]({{}}/rancher/v2.x/en/overview/architecture/concepts/#about-helm) a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. + +In a typical installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. + +Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. + +The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. + +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture) + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
+![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/content/rancher/v2.x/en/installation/k8s-install/_index.md b/content/rancher/v2.x/en/installation/k8s-install/_index.md new file mode 100644 index 00000000000..36d6949e90d --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/_index.md @@ -0,0 +1,65 @@ +--- +title: Installing Rancher on a Kubernetes Cluster +weight: 3 +description: For production environments, install Rancher in a high-availability configuration. Read the guide for setting up a 3-node cluster and still install Rancher using a Helm chart. +aliases: + - /rancher/v2.x/en/installation/ha/ +--- + +For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. + +This section describes how to first use RKE to create and manage a cluster, then install Rancher onto that cluster. For this type of architecture, you will need to deploy three VMs in the infrastructure provider of your choice. You will also need to configure a load balancer to direct front-end traffic to the three VMs. When the VMs are running and fulfill the [node requirements,]({{}}/rancher/v2.x/en/installation/requirements) you can use RKE to deploy Kubernetes onto them, then use the Helm package manager to deploy Rancher onto Kubernetes. + +### Optional: Installing Rancher on a Single-node Kubernetes Cluster + +If you only have one node, but you want to use the Rancher server in production in the future, it is better to install Rancher on a single-node Kubernetes cluster than to install it with Docker. + +One option is to install Rancher with Helm on a Kubernetes cluster, but to only use a single node in the cluster. In this case, the Rancher server does not have high availability, which is important for running Rancher in production. However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. + +The single-node Kubernetes install can be achieved by describing only one node in the `cluster.yml` when provisioning the Kubernetes cluster with RKE. The single node would have all three roles: `etcd`, `controlplane`, and `worker`. Then Rancher would be installed with Helm on the cluster in the same way that it would be installed on any other cluster. + +### Important Notes on Architecture + +The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. + +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. + +We recommend the following architecture and configurations for the load balancer and Ingress controllers: + +- DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +For more information on how a Kubernetes Installation works, refer to [this page.]({{}}/rancher/v2.x/en/installation/how-ha-works) + +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.x/en/overview/architecture) + +## Required CLI Tools + +The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +## Installation Outline + +- [Create Nodes and Load Balancer]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/) +- [Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/) +- [Install Rancher]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/) + +## Additional Install Options + +- [Migrating from a high-availability Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) +- [Installing Rancher with Helm 2:]({{}}/rancher/v2.x/en/installation/options/helm2) This section provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +## Previous Methods + +[RKE add-on install]({{}}/rancher/v2.x/en/installation/options/rke-add-on/) + +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +> Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/_index.md b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/_index.md new file mode 100644 index 00000000000..97c3e200657 --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/_index.md @@ -0,0 +1,32 @@ +--- +title: '1. Create Nodes and Load Balancer' +weight: 185 +aliases: + - /rancher/v2.x/en/installation/ha/create-nodes-lb +--- + +Use your infrastructure provider of choice to provision three nodes and a load balancer endpoint for your RKE install. + +> **Note:** These nodes must be in the same region/datacenter. You may place these servers in separate availability zones. + +### Requirements for OS, Docker, Hardware, and Networking + +Make sure that your nodes fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +View the OS requirements for RKE at [RKE Requirements.]({{}}/rke/latest/en/os/) + +### Load Balancer + +RKE will configure an Ingress controller pod, on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. + +Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +#### How-to Guides + +- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx/) +- For an example showing how to setup an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb/) + +### [Next: Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/) diff --git a/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx/_index.md new file mode 100644 index 00000000000..49a77c9010e --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx/_index.md @@ -0,0 +1,86 @@ +--- +title: Setting up an NGINX Load Balancer +weight: 270 +aliases: + - /rancher/v2.x/en/installation/ha/create-nodes-lb/nginx +--- + +NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. + +> **Note:** +> In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. +> +> One caveat: do not use one of your Rancher nodes as the load balancer. + +## Install NGINX + +Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. + +## Create NGINX Configuration + +After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/). + + > **Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. + +
Example NGINX config
+ ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + stream { + upstream rancher_servers_http { + least_conn; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + } + server { + listen 80; + proxy_pass rancher_servers_http; + } + + upstream rancher_servers_https { + least_conn; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers_https; + } + + } + + ``` + + ``` + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +## Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` diff --git a/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb/_index.md new file mode 100644 index 00000000000..29aca8a2e39 --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb/_index.md @@ -0,0 +1,154 @@ +--- +title: Setting up an Amazon NLB Load Balancer +weight: 277 +aliases: + - /rancher/v2.x/en/installation/ha/create-nodes-lb/nlb +--- + +This how-to guide describes how to set up a load balancer in Amazon's EC2 service that will direct traffic to multiple instances on EC2. + +> **Note:** Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ELB or ALB. + +Configuring an Amazon NLB is a multistage process: + +1. [Create Target Groups](#1-create-target-groups) +2. [Register Targets](#2-register-targets) +3. [Create Your NLB](#3-create-your-nlb) +4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) + +> **Prerequisite:** These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these two nodes. + +# 1. Create Target Groups + +Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX ingress controller on the nodes will make sure that port 80 gets redirected to port 443. + +1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. +1. Click **Create target group** to create the first target group, regarding TCP port 443. + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. + +| Option | Setting | +| ----------------------------------- | ----------------- | +| Target Group Name | `rancher-tcp-443` | +| Protocol | `TCP` | +| Port | `443` | +| Target type | `instance` | +| VPC | Choose your VPC | +| Protocol
(Health Check) | `HTTP` | +| Path
(Health Check) | `/healthz` | +| Port (Advanced health check) | `override`,`80` | +| Healthy threshold (Advanced health) | `3` | +| Unhealthy threshold (Advanced) | `3` | +| Timeout (Advanced) | `6 seconds` | +| Interval (Advanced) | `10 second` | +| Success codes | `200-399` | + + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. + +| Option | Setting | +| ----------------------------------- | ---------------- | +| Target Group Name | `rancher-tcp-80` | +| Protocol | `TCP` | +| Port | `80` | +| Target type | `instance` | +| VPC | Choose your VPC | +| Protocol
(Health Check) | `HTTP` | +| Path
(Health Check) | `/healthz` | +| Port (Advanced health check) | `traffic port` | +| Healthy threshold (Advanced health) | `3` | +| Unhealthy threshold (Advanced) | `3` | +| Timeout (Advanced) | `6 seconds` | +| Interval (Advanced) | `10 second` | +| Success codes | `200-399` | + +# 2. Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +
+**Screenshot Add targets to target group TCP port 443**
+ +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} + +
+**Screenshot Added targets to target group TCP port 443**
+ +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +# 3. Create Your NLB + +Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + +Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. Then complete each form. + +- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) +- [Step 2: Configure Routing](#step-2-configure-routing) +- [Step 3: Register Targets](#step-3-register-targets) +- [Step 4: Review](#step-4-review) + +### Step 1: Configure Load Balancer + +Set the following fields in the form: + +- **Name:** `rancher` +- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. +- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. +- **Availability Zones:** Select Your **VPC** and **Availability Zones**. + +### Step 2: Configure Routing + +1. From the **Target Group** drop-down, choose **Existing target group**. +1. From the **Name** drop-down, choose `rancher-tcp-443`. +1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +### Step 3: Register Targets + +Since you registered your targets earlier, all you have to do is click **Next: Review**. + +### Step 4: Review + +Look over the load balancer details and click **Create** when you're satisfied. + +After AWS creates the NLB, click **Close**. + +# 4. Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md new file mode 100644 index 00000000000..ea0fcda275f --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/helm-rancher/_index.md @@ -0,0 +1,214 @@ +--- +title: 3. Install Rancher on the Kubernetes Cluster +description: Rancher installation is managed using the Helm Kubernetes package manager. Use Helm to install the prerequisites and charts to install Rancher +weight: 200 +aliases: + - /rancher/v2.x/en/installation/ha/helm-rancher +--- + +Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. + +With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. + +For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). + +To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.x/en/installation/options/server-tags) + +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) + +> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on Kubernetes with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +### Install Helm + +Helm requires a simple CLI tool to be installed. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. + +### Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### Create a Namespace for Rancher + +We'll need to define a namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: + +``` +kubectl create namespace cattle-system +``` + +### Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +There are three recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------ | -------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------- | +| Rancher Generated Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** | [yes](#optional-install-cert-manager) | +| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | Use Let's Encrypt to issue a certificate | [yes](#optional-install-cert-manager) | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s) | no | + +### Optional: Install cert-manager + +Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. + +`cert-manager` is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). + +{{% accordion id="cert-manager" label="Click to Expand" %}} + +> **Important:** +> Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. + +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + +These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). + +``` +# Install the CustomResourceDefinition resources separately +kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + +> **Important:** +> If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false flag to your kubectl apply command above else you will receive a validation error relating to the x-kubernetes-preserve-unknown-fields field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +# Create the namespace for cert-manager +kubectl create namespace cert-manager + +# Add the Jetstack Helm repository +helm repo add jetstack https://charts.jetstack.io + +# Update your local Helm chart repository cache +helm repo update + +# Install the cert-manager Helm chart +helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 +``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +{{% /accordion %}} + +### Install Rancher with Helm and Your Chosen Certificate Option + +{{% tabs %}} +{{% tab "Rancher-generated Certificates" %}} + +> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. + +The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set the `hostname` to the DNS name you pointed at your load balancer. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +{{% /tab %}} +{{% tab "Let's Encrypt" %}} + +> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. This configuration uses HTTP validation (`HTTP-01`) so the load balancer must have a public DNS record and be accessible from the internet. + +- Set `hostname` to the public DNS record, set `ingress.tls.source` to `letsEncrypt` and `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices) + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +{{% /tab %}} +{{% tab "Certificates from Files" %}} +Create Kubernetes secrets from your own certificates for Rancher to use. + +> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +- Set `hostname` and set `ingress.tls.source` to `secret`. +- If you are using a Private CA signed certificate , add `--set privateCA=true` to the command shown below. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret +``` + +Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. +{{% /tab %}} +{{% /tabs %}} + +### Advanced Configurations + +The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. + +- [HTTP Proxy]({{}}/rancher/v2.x/en/installation/options/chart-options/#http-proxy) +- [Private Docker Image Registry]({{}}/rancher/v2.x/en/installation/options/chart-options/#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination) + +See the [Chart Options]({{}}/rancher/v2.x/en/installation/options/chart-options/) for the full list of options. + +### Save your options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) Page diff --git a/content/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/_index.md new file mode 100644 index 00000000000..5ba0ca0240d --- /dev/null +++ b/content/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/_index.md @@ -0,0 +1,144 @@ +--- +title: 2. Set up a Kubernetes Cluster +description: Learn how to use Rancher Kubernetes Engine (RKE) to install Kubernetes with a high availability etcd configuration. +weight: 190 +aliases: + - /rancher/v2.x/en/installation/ha/kubernetes-rke/ +--- + +This section describes how to install a Kubernetes cluster on your three nodes according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.x/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. We recommend using RKE to install Kubernetes on this cluster. Hosted Kubernetes providers such as EKS should not be used. + +For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) + +> **Single-node Installation Tip:** +> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. +> +> To set up a single-node cluster, configure only one node in the `cluster.yml` when provisioning the cluster with RKE. The single node should have all three roles: `etcd`, `controlplane`, and `worker`. Then Rancher can be installed with Helm on the cluster in the same way that it would be installed on any other cluster. + +### Create the `rancher-cluster.yml` File + +Using the sample below, create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. + +If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 165.227.114.63 + internal_address: 172.16.22.12 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.116.167 + internal_address: 172.16.32.37 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.127.226 + internal_address: 172.16.42.73 + user: ubuntu + role: [controlplane, worker, etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Required for external TLS termination with +# ingress-nginx v0.22+ +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + +#### Common RKE Nodes Options + +| Option | Required | Description | +| ------------------ | -------- | -------------------------------------------------------------------------------------- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | + +#### Advanced Configurations + +RKE has many configuration options for customizing the install to suit your specific environment. + +Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. + +For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{}}/rancher/v2.x/en/installation/options/etcd/). + +### Run RKE + +``` +rke up --config ./rancher-cluster.yml +``` + +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + +### Testing Your Cluster + +RKE should have created a file `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. + +> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. + +You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`. + +``` +export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml +``` + +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state. + +``` +kubectl get nodes + +NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 +``` + +### Check the Health of Your Cluster Pods + +Check that all the required pods and containers are healthy are ready to continue. + +- Pods are in `Running` or `Completed` state. +- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` +- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s +kube-system canal-jp4hz 3/3 Running 0 30s +kube-system canal-z2hg8 3/3 Running 0 30s +kube-system canal-z6kpw 3/3 Running 0 30s +kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s +kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s +kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s +kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s +kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s +kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s +kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s +``` + +### Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +### Issues or errors? + +See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. + +### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/) diff --git a/content/rancher/v2.x/en/installation/options/_index.md b/content/rancher/v2.x/en/installation/options/_index.md index 15fb2960c37..be369e77a78 100644 --- a/content/rancher/v2.x/en/installation/options/_index.md +++ b/content/rancher/v2.x/en/installation/options/_index.md @@ -1,13 +1,14 @@ --- -title: Advanced Options -weight: 350 +title: Resources, References, and Advanced Options +weight: 5 --- When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: -| Advanced Option | Available as of | -| --- | ---| -| [Custom CA Certificate]({{< baseurl >}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | -| [API Audit Log]({{< baseurl >}}/rancher/v2.x/en/installation/options/api-audit-log/) | v2.0.0 | -| [TLS Settings]({{< baseurl >}}/rancher/v2.x/en/installation/options/tls-settings/) | v2.1.7 | -| [etcd configuration]({{< baseurl >}}/rancher/v2.x/en/installation/options/etcd/) | v2.2.0 | +| Advanced Option | Available as of | +| ----------------------------------------------------------------------------------------------------------------------- | --------------- | +| [Custom CA Certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | +| [API Audit Log]({{}}/rancher/v2.x/en/installation/options/api-audit-log/) | v2.0.0 | +| [TLS Settings]({{}}/rancher/v2.x/en/installation/options/tls-settings/) | v2.1.7 | +| [etcd configuration]({{}}/rancher/v2.x/en/installation/options/etcd/) | v2.2.0 | +| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.x/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/_index.md new file mode 100644 index 00000000000..1212425742a --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/_index.md @@ -0,0 +1,43 @@ +--- +title: Installing Rancher in an Air Gapped Environment with Helm 2 +weight: 2 +aliases: + - /rancher/v2.x/en/installation/air-gap-installation/ + - /rancher/v2.x/en/installation/air-gap-high-availability/ + - /rancher/v2.x/en/installation/air-gap-single-node/ +--- + +> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. +> +> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> This section provides a copy of the older instructions for installing Rancher on a Kubernetes cluster using Helm 2 in an air air gap environment, and it is intended to be used if upgrading to Helm 3 is not feasible. + +This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +Throughout the installations instructions, there will be _tabs_ for either a high availability Kubernetes installation or a single-node Docker installation. + +### Air Gapped Kubernetes Installations + +This section covers how to install Rancher on a Kubernetes cluster in an air gapped environment. + +A Kubernetes installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Air Gapped Docker Installations + +These instructions also cover how to install Rancher on a single node in an air gapped environment. + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +# Installation Outline + +- [1. Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) +- [2. Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) +- [3. Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) +- [4. Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) + +### [Next: Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md new file mode 100644 index 00000000000..619c44dcc14 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/install-rancher/_index.md @@ -0,0 +1,321 @@ +--- +title: 4. Install Rancher +weight: 400 +aliases: + - /rancher/v2.x/en/installation/air-gap-installation/install-rancher/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/ + - /rancher/v2.x/en/installation/air-gap-single-node/install-rancher + - /rancher/v2.x/en/installation/air-gap/install-rancher +--- + +This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes Installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher in five parts: + +- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) +- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) +- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) +- [D. Install Rancher](#d-install-rancher) +- [E. For Rancher versions prior to v2.3.0, Configure System Charts](#e-for-rancher-versions-prior-to-v2-3-0-configure-system-charts) + +### A. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + ```plain + helm init -c + ``` + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +```plain +helm fetch rancher-/rancher +``` + +> Want additional options? Need help troubleshooting? See [Kubernetes Install: Advanced Options]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#advanced-configurations). + +### B. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | + +### C. Render the Rancher Helm Template + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. + +{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + +1. From a system connected to the internet, add the cert-manager repo to Helm. + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` +1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + + ```plain + helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} + +{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | +```plain + helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +Then refer to [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +{{% /accordion %}} + +### D. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you chose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +{{% accordion id="install-cert-manager" label="Self-Signed Certificate Installs - Install Cert-manager" %}} + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + +> **Important:** +> If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false flag to your kubectl apply command above else you will receive a validation error relating to the x-kubernetes-preserve-unknown-fields field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +{{% /accordion %}} + +Install Rancher: + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` + +**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. + +### E. For Rancher versions prior to v2.3.0, Configure System Charts + +If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/options/chart-options/) +- [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) + +{{% /tab %}} +{{% tab "Docker Install" %}} + +The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +> **Do you want to...** +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/options/chart-options/#additional-trusted-cas). +> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log). + +- For Rancher prior to v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher prior to v2.3.0.]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0) + +Choose from the following options: + +{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} +{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#cert-order). + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} +{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} + +If you are installing Rancher v2.3.0+, the installation is complete. + +If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/launch-kubernetes/_index.md new file mode 100644 index 00000000000..a231b04df6f --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/launch-kubernetes/_index.md @@ -0,0 +1,80 @@ +--- +title: '3. Install Kubernetes with RKE (Kubernetes Installs Only)' +weight: 300 +aliases: + - /rancher/v2.x/en/installation/air-gap-high-availability/install-kube +--- + +This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. + +Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{}}/rke/latest/en/installation/) and create a RKE config file. + +- [A. Create an RKE Config File](#a-create-an-rke-config-file) +- [B. Run RKE](#b-run-rke) +- [C. Save Your Files](#c-save-your-files) + +### A. Create an RKE Config File + +From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts) you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). + +
RKE Options
+ +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gap network. | +| `user` | ✓ | A user that can run docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### B. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### C. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md new file mode 100644 index 00000000000..6a286a8656a --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/populate-private-registry/_index.md @@ -0,0 +1,280 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +aliases: + - /rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/ + - /rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ +--- + +> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. +> +> **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. + +By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.x/en/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed for a Windows cluster. + +{{% tabs %}} +{{% tab "Linux Only Clusters" %}} + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +A. Find the required assets for your Rancher version
+B. Collect all the required images
+C. Save the images to your workstation
+D. Populate the private registry + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section (pictured above), download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images (For Kubernetes Installs using Rancher Generated Self-Signed Certificate) + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.9.1, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.9.1 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` +{{% /tab %}} +{{% tab "Linux and Windows Clusters" %}} + +_Available as of v2.3.0_ + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +### Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +A. Find the required assets for your Rancher version
+B. Save the images to your Windows Server workstation
+C. Prepare the Docker daemon
+D. Populate the private registry + +{{% accordion label="Collecting and Populating Windows Images into the Private Registry"%}} + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + + | Release File | Description | + | ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | + | `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | + | `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | + | `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + +### B. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + + ```plain + ./rancher-save-images.ps1 + ``` + + **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + +### C. Prepare the Docker daemon + +1. Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ```json + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + +### D. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. + +1. Using `powershell`, log into your private registry if required: + + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + + ```plain + ./rancher-load-images.ps1 --registry + ``` + +{{% /accordion %}} + +### Linux Steps + +The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +A. Find the required assets for your Rancher version
+B. Collect all the required images
+C. Save the images to your Linux workstation
+D. Populate the private registry + +{{% accordion label="Collecting and Populating Linux Images into the Private Registry" %}} + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section (pictured above), download the following files, which are required to install Rancher in an air gap environment: + + | Release File | Description | + | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | + | `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | + | `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | + | `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | + | `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images + +1. **For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + + 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.9.1, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.9.1 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + + 2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + + **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry + ``` + +{{% /accordion %}} + +{{% /tab %}} +{{% /tabs %}} + +### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) + +### [Next: Docker Installs - Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.x/en/installation/options/air-gap-helm2/prepare-nodes/_index.md b/content/rancher/v2.x/en/installation/options/air-gap-helm2/prepare-nodes/_index.md new file mode 100644 index 00000000000..ff9080548ef --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/air-gap-helm2/prepare-nodes/_index.md @@ -0,0 +1,105 @@ +--- +title: '1. Prepare your Node(s)' +weight: 100 +aliases: + - /rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts + - /rancher/v2.x/en/installation/air-gap-single-node/provision-host +--- + +This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + +# Prerequisites + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). + +### CLI Tools + +The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +{{% /tab %}} +{{% tab "Docker Install" %}} + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). +{{% /tab %}} +{{% /tabs %}} + +# Set up Infrastructure + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
+ +![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) + +### A. Provision three air gapped Linux hosts according to our requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +### B. Set up your Load Balancer + +When setting up the Kubernetes cluster that will run the Rancher server components, an Ingress controller pod will be deployed on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. + +You will need to configure a load balancer as a basic Layer 4 TCP forwarder to direct traffic to these ingress controller pods. The exact configuration will vary depending on your environment. + +> **Important:** +> Only use this load balancer (i.e, the `local` cluster Ingress) to load balance the Rancher server. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. + +**Load Balancer Configuration Samples:** + +- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx) +- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb) + +{{% /tab %}} +{{% tab "Docker Install" %}} + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +### A. Provision a single, air gapped Linux host according to our Requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +{{% /tab %}} +{{% /tabs %}} + +### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md b/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md index 147153aea90..cdb9c07ac57 100644 --- a/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md +++ b/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md @@ -1,10 +1,8 @@ --- -title: API Audit Log +title: Enabling the API Audit Log to Record System Events weight: 10000 aliases: - - /rancher/v2.x/en/installation/api-auditing/ - - /rancher/v2.x/en/admin-settings/api-auditing/ - - /rancher/v2.x/en/admin-settings/api-audit-log/ + - /rancher/v2.x/en/admin-settings/api-auditing/ --- You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. @@ -15,41 +13,42 @@ You can enable API Auditing during Rancher installation or upgrade. The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. -- [Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#api-audit-log) +- [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log) -- [HA Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#api-audit-log) +- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/options/chart-options/#api-audit-log) ## API Audit Log Options The usage below defines rules about what the audit log should record and what data it should include: -Parameter | Description | ----------|----------| - `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | - `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| - `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | - `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. - `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. +| Parameter | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | +| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| +| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | +| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | +| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | +
### Audit Log Levels The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. -| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | -| --------------------- | ------------------ | ------------ | ------------------- | ------------------- | -| `0` | | | | | -| `1` | ✓ | | | | -| `2` | ✓ | ✓ | | | -| `3` | ✓ | ✓ | ✓ | ✓ | +| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | +| --------------------- | ---------------- | ------------ | ----------------- | ------------- | +| `0` | | | | | +| `1` | ✓ | | | | +| `2` | ✓ | ✓ | | | +| `3` | ✓ | ✓ | ✓ | ✓ | ## Viewing API Audit Logs -### Single Node Install +### Docker Install Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. -### HA Install +### Kubernetes Install Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. @@ -65,23 +64,23 @@ kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log 1. From the context menu, select **Cluster: local > System**. - ![Local Cluster: System Project]({{< baseurl >}}/img/rancher/audit_logs_gui/context_local_system.png) + ![Local Cluster: System Project]({{}}/img/rancher/audit_logs_gui/context_local_system.png) -1. From the **Workloads** tab, find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. +1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. - ![Rancher Workload]({{< baseurl >}}/img/rancher/audit_logs_gui/rancher_workload.png) + ![Rancher Workload]({{}}/img/rancher/audit_logs_gui/rancher_workload.png) 1. Pick one of the `rancher` pods and select **Ellipsis (...) > View Logs**. - ![View Logs]({{< baseurl >}}/img/rancher/audit_logs_gui/view_logs.png) + ![View Logs]({{}}/img/rancher/audit_logs_gui/view_logs.png) 1. From the **Logs** drop-down, select `rancher-audit-log`. - ![Select Audit Log]({{< baseurl >}}/img/rancher/audit_logs_gui/rancher_audit_log_container.png) + ![Select Audit Log]({{}}/img/rancher/audit_logs_gui/rancher_audit_log_container.png) #### Shipping the Audit Log -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging) for details. +You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{}}/rancher/v2.x/en/tools/logging) for details. ## Audit Log Samples @@ -93,20 +92,16 @@ If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every ```json { - "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", - "requestURI": "/v3/schemas", - "sourceIPs": [ - "::1" - ], - "user": { - "name": "user-f4tt2", - "group": [ - "system:authenticated" - ] - }, - "verb": "GET", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:22:43 +0800" + "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", + "requestURI": "/v3/schemas", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "GET", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:22:43 +0800" } ``` @@ -114,167 +109,161 @@ If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. - The code sample below depicts an API request, with both its metadata header and body. ```json { - "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": [ - "::1" - ], - "user": { - "name": "user-f4tt2", - "group": [ - "system:authenticated" - ] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:28:08 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, + "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:28:08 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": [ - "10.64.3.58" - ], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my description", - "volumes": [] - } + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my description", + "volumes": [] + } } ``` + ### Metadata, Request Body, and Response Body Level If you set your `AUDIT_LEVEL` to `3`, Rancher logs: @@ -288,160 +277,154 @@ The code sample below depicts an API request, with both its metadata header and ```json { - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": [ - "::1" - ], - "user": { - "name": "user-f4tt2", - "group": [ - "system:authenticated" - ] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": [ - "10.64.3.58" - ], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my decript", - "volumes": [] - } + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my decript", + "volumes": [] + } } ``` @@ -449,151 +432,148 @@ The code sample below depicts an API request, with both its metadata header and The code sample below depicts an API response, with both its metadata header and body. - ```json { - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "responseStatus": "200", - "stage": "ResponseComplete", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "responseBody": { - "actionLinks": { - "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", - "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", - "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" - }, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements" - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container" - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "links": { - "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", - "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" - }, + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "responseStatus": "200", + "stage": "ResponseComplete", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "responseBody": { + "actionLinks": { + "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", + "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", + "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" + }, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, "name": "nginx", - "namespaceId": "default", - "paused": false, - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": [ - "10.64.3.58" - ], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport" - } + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements" }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container" + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "links": { + "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", + "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" + }, + "name": "nginx", + "namespaceId": "default", + "paused": false, + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" } + } } ``` diff --git a/content/rancher/v2.x/en/installation/arm64-platform/_index.md b/content/rancher/v2.x/en/installation/options/arm64-platform/_index.md similarity index 56% rename from content/rancher/v2.x/en/installation/arm64-platform/_index.md rename to content/rancher/v2.x/en/installation/options/arm64-platform/_index.md index 02fd9660303..762d3c1541d 100644 --- a/content/rancher/v2.x/en/installation/arm64-platform/_index.md +++ b/content/rancher/v2.x/en/installation/options/arm64-platform/_index.md @@ -1,6 +1,8 @@ --- title: Running on ARM64 (Experimental) weight: 7600 +aliases: + - /rancher/v2.x/en/installation/arm64-platform --- _Available as of v2.2.0_ @@ -12,16 +14,16 @@ _Available as of v2.2.0_ The following options are available when using an ARM64 platform: - Running Rancher on ARM64 based node(s) - - Only [Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/) + - Only [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) - Create custom cluster and adding ARM64 based node(s) - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{< baseurl >}}/rancher/v2.x/en/faq/networking/cni-providers/#flannel) + - CNI Network Provider must be [Flannel]({{}}/rancher/v2.x/en/faq/networking/cni-providers/#flannel) - Importing clusters that contain ARM64 based nodes - Kubernetes cluster version must be 1.12 or higher -Please see [Cluster Options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) how to configure the cluster options. +Please see [Cluster Options]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/) how to configure the cluster options. The following features are not tested: -* Monitoring, alerts, notifiers, pipelines and logging -* Launching apps from the catalog +- Monitoring, alerts, notifiers, pipelines and logging +- Launching apps from the catalog diff --git a/content/rancher/v2.x/en/installation/options/chart-options/_index.md b/content/rancher/v2.x/en/installation/options/chart-options/_index.md new file mode 100644 index 00000000000..7bc68cce822 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/chart-options/_index.md @@ -0,0 +1,245 @@ +--- +title: Helm Chart Options for Kubernetes Installations +weight: 276 +--- + +### Common Options + +| Option | Default Value | Description | +| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | +| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | +| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | +| `letsEncrypt.email` | " " | `string` - Your email address | +| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | +| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | + +
+ +### Advanced Options + +| Option | Default Value | Description | +| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | +| `addLocal` | "auto" | `string` - Have Rancher detect and import the "local" Rancher server cluster [Import "local Cluster](#import-local-cluster) | +| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | +| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | +| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | +| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | +| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | +| `debug` | false | `bool` - set debug flag on rancher server | +| `certmanager.version` | "" | `string` - set cert-manager compatibility + | +| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | +| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | +| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | +| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | +| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | +| `resources` | {} | `map` - rancher pod resource requests & limits | +| `rancherImage` | "rancher/rancher" | `string` - rancher image source | +| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | +| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ _Available as of v2.3.0_ | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. _Available as of v2.3.0_ | + +
+ +### API Audit Log + +Enabling the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing/). + +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/tools/logging/) for the `System` Project on the Rancher server cluster. + +```plain +--set auditLog.level=1 +``` + +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/tools/logging/) for the Rancher server cluster or System Project. + +Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. + +### Setting Extra Environment Variables + +_Available as of v2.2.0_ + +You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +### TLS settings + +_Available as of v2.2.0_ + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. + +### Import `local` Cluster + +By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. + +If this is a concern in your environment you can set this option to "false" on your initial install. + +> Note: This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. + +```plain +--set addLocal="false" +``` + +### Customizing your Ingress + +To customize or use a different ingress with Rancher server you can set your own Ingress annotations. + +Example on setting a custom certificate issuer: + +```plain +--set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair +``` + +_Available as of v2.0.15, v2.1.10 and v2.2.4_ + +Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. + +```plain +--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' +``` + +### HTTP Proxy + +Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. + +Add your IP exceptions to the `noProxy` list. Make sure you add the Service cluster IP range (default: 10.43.0.1/16) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. + +```plain +--set proxy="http://:@:/" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" +``` + +### Additional Trusted CAs + +If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. + +```plain +--set additionalTrustedCAs=true +``` + +Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. + +```plain +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem +``` + +### Private Registry and Air Gap Installs + +For details on installing Rancher with a private registry, see: + +- [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) +- [Air Gap: Kubernetes Install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) + +### External TLS Termination + +We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. + +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. + +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.x/en/installation/options/tls-secrets/#using-a-private-ca-signed-certificate) to add the CA cert for Rancher. + +Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. + +#### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: 'true' +``` + +#### Required Headers + +- `Host` +- `X-Forwarded-Proto` +- `X-Forwarded-Port` +- `X-Forwarded-For` + +#### Recommended Timeouts + +- Read Timeout: `1800 seconds` +- Write Timeout: `1800 seconds` +- Connect Timeout: `30 seconds` + +#### Health Checks + +Rancher will respond `200` to health checks on the `/healthz` endpoint. + +#### Example NGINX config + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server IP_NODE_1:80; + server IP_NODE_2:80; + server IP_NODE_3:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` diff --git a/content/rancher/v2.x/en/installation/options/custom-ca-root-certificate/_index.md b/content/rancher/v2.x/en/installation/options/custom-ca-root-certificate/_index.md index da2ca3d6bd3..69a2a82944e 100644 --- a/content/rancher/v2.x/en/installation/options/custom-ca-root-certificate/_index.md +++ b/content/rancher/v2.x/en/installation/options/custom-ca-root-certificate/_index.md @@ -1,10 +1,10 @@ --- -title: Custom CA root certificate +title: About Custom CA Root Certificates weight: 1110 aliases: - /rancher/v2.x/en/installation/custom-ca-root-certificate/ - - /rancher/v2.x/en/admin-settings/custom-ca-root-certificate/ --- + If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. @@ -13,14 +13,15 @@ To validate the certificate, the CA root certificates need to be added to Ranche Examples of services that Rancher can access: -* Catalogs -* Authentication providers -* Accessing hosting/cloud API when using Node Drivers +- Catalogs +- Authentication providers +- Accessing hosting/cloud API when using Node Drivers ## Installing with the custom CA Certificate For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: -- [Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#custom-ca-certificate) +- [Docker Install]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#custom-ca-certificate) + +- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/options/chart-options/#additional-trusted-cas) -- [HA Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#additional-trusted-cas) diff --git a/content/rancher/v2.x/en/installation/options/etcd/_index.md b/content/rancher/v2.x/en/installation/options/etcd/_index.md index 8cc87fc3954..2f99cd9737f 100644 --- a/content/rancher/v2.x/en/installation/options/etcd/_index.md +++ b/content/rancher/v2.x/en/installation/options/etcd/_index.md @@ -1,45 +1,41 @@ --- -title: etcd Advanced Configurations -weight: 1 +title: Tuning etcd for Large Installations +weight: 3 aliases: --- -## Tuning etcd for large installations ## - When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. -The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.3.12/op-guide/maintenance/#space-quota) setting on the etcd servers. +The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. -### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB ### +### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB ```yaml # RKE cluster.yml -... +--- services: etcd: extra_args: quota-backend-bytes: 5368709120 -... ``` -## Scaling etcd disk performance ## +## Scaling etcd disk performance -You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.3.12/tuning/#disk) on how to tune the disk priority on the host. +You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.4.0/tuning/#disk) on how to tune the disk priority on the host. Additionally, to reduce IO contention on the disks for etcd, you can use a dedicated device for the data and wal directory. Based on etcd best practices, mirroring RAID configurations are unnecessary because etcd replicates data between the nodes in the cluster. You can use stripping RAID configurations to increase available IOPS. -To implement this solution in an RKE cluster, the `/var/lib/etcd/data` and `/var/lib/etc/wal` directories will need to have disks mounted and formmated on the underlying host. In the `extra_args` directive of the `etcd` service, you must include the `wal_dir` directory. Without specifying the `wal_dir`, etcd process will try to manipulate the underlying `wal` mount with insufficient permissions. +To implement this solution in an RKE cluster, the `/var/lib/etcd/data` and `/var/lib/etc/wal` directories will need to have disks mounted and formatted on the underlying host. In the `extra_args` directive of the `etcd` service, you must include the `wal_dir` directory. Without specifying the `wal_dir`, etcd process will try to manipulate the underlying `wal` mount with insufficient permissions. ```yaml # RKE cluster.yml -... +--- services: etcd: extra_args: - data-dir: "/var/lib/rancher/etcd/data/" - wal-dir: "/var/lib/rancher/etcd/wal/wal_dir" + data-dir: '/var/lib/rancher/etcd/data/' + wal-dir: '/var/lib/rancher/etcd/wal/wal_dir' extra_binds: - - "/var/lib/etcd/data:/var/lib/rancher/etcd/data" - - "/var/lib/etcd/wal:/var/lib/rancher/etcd/wal" -... -``` \ No newline at end of file + - '/var/lib/etcd/data:/var/lib/rancher/etcd/data' + - '/var/lib/etcd/wal:/var/lib/rancher/etcd/wal' +``` diff --git a/content/rancher/v2.x/en/installation/options/feature-flags/_index.md b/content/rancher/v2.x/en/installation/options/feature-flags/_index.md new file mode 100644 index 00000000000..655598c04a5 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/feature-flags/_index.md @@ -0,0 +1,148 @@ +--- +title: Enabling Experimental Features +weight: 8000 +--- + +_Available as of v2.3.0_ + +Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. + +The features can be enabled in three ways: + +- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. +- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) in Rancher v2.3.3+ by going to the **Settings** page. +- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. + +Each feature has two values: + +- A default value, which can be configured with a flag or environment variable from the command line +- A set value, which can be configured with the Rancher API or UI + +If no value has been set, Rancher uses the default value. + +Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. + +For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. + +The following is a list of the feature flags available in Rancher: + +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. + +The below table shows the availability and default value for feature flags in Rancher: + +| Feature Flag Name | Default Value | Status | Available as of | +| ----------------------------- | ------------- | ------------ | --------------- | +| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | +| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | +| `istio-virtual-service-ui` | `true` | GA | v2.3.2 | + +# Enabling Features when Starting Rancher + +When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. + +> **Note:** Values set from the Rancher API will override the value passed in through the command line. + +{{% tabs %}} +{{% tab "Kubernetes Install" %}} +When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: + +``` +helm install rancher-latest/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + +### Rendering the Helm Chart for Air Gap Installations + +For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) + +Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. + +The Helm 3 command is as follows: + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + +The Helm 2 command is as follows: + +``` +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + +{{% /tab %}} +{{% tab "Docker Install" %}} +When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +docker run -d -p 80:80 -p 443:443 \ + --restart=unless-stopped \ + rancher/rancher:rancher-latest \ + --features==true,=true # Available as of v2.3.0 +``` + +{{% /tab %}} +{{% /tabs %}} + +# Enabling Features with the Rancher UI + +_Available as of Rancher v2.3.3_ + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To enable a feature, go to the disabled feature you want to enable and click **Ellipsis (...) > Activate.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher UI + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To disable a feature, go to the enabled feature you want to disable and click **Ellipsis (...) > Deactivate.** + +**Result:** The feature is disabled. + +# Enabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **True.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **False.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is disabled. diff --git a/content/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/_index.md new file mode 100644 index 00000000000..8d254d8bd6d --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/_index.md @@ -0,0 +1,43 @@ +--- +title: Allow Unsupported Storage Drivers +weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/feature-flags/enable-not-default-storage-drivers +--- +_Available as of v2.3.0_ + +This feature allows you to use types for storage providers and provisioners that are not enabled by default. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) + +Environment Variable Key | Default Value | Description +---|---|--- + `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. + +### Types for Persistent Volume Plugins that are Enabled by Default +Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +### Types for StorageClass that are Enabled by Default +Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|-------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui/_index.md new file mode 100644 index 00000000000..f631b54d2ee --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui/_index.md @@ -0,0 +1,34 @@ +--- +title: UI for Istio Virtual Services and Destination Rules +weight: 2 +aliases: + - /rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui +--- +_Available as of v2.3.0_ + +This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. + +> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) in order to use the feature. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/installation/options/feature-flags/) + +Environment Variable Key | Default Value | Status | Available as of +---|---|---|--- +`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 + +# About this Feature + +A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. + +When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. + +The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** + +- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) +- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) + +To see these tabs, + +1. Go to the project view in Rancher and click **Resources > Istio.** +1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/firewall/_index.md b/content/rancher/v2.x/en/installation/options/firewall/_index.md new file mode 100644 index 00000000000..601d8c046ee --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/firewall/_index.md @@ -0,0 +1,106 @@ +--- +title: Opening Ports with firewalld +weight: 12000 +--- + +Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. + +For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: + +``` +Chain INPUT (policy ACCEPT) +target prot opt source destination +ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +ACCEPT icmp -- anywhere anywhere +ACCEPT all -- anywhere anywhere +ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain FORWARD (policy ACCEPT) +target prot opt source destination +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +``` + +You can check the default firewall rules with this command: + +``` +sudo iptables --list +``` + +This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.x/en/installation/references) for nodes in a high-availability Rancher server cluster. + +# Prerequisite + +Install v7.x or later ofv`firewalld`: + +``` +yum install firewalld +systemctl start firewalld +systemctl enable firewalld +``` + +# Applying Firewall Port Rules + +In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: + +``` +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` +If your Rancher server nodes have separate roles, use the following commands based on the role of the node: + +``` +# For etcd nodes, run the following commands: +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp + +# For control plane nodes, run the following commands: +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp + +# For worker nodes, run the following commands: +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` + +After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: + +``` +firewall-cmd --reload +``` + +**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/helm-version/_index.md b/content/rancher/v2.x/en/installation/options/helm-version/_index.md new file mode 100644 index 00000000000..11900d73e09 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm-version/_index.md @@ -0,0 +1,14 @@ +--- +title: Helm Version Requirements +weight: 400 +aliases: +- /rancher/v2.x/en/installation/helm-version +--- + +This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. + +> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.x/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. +- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. +- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.x/en/installation/options/helm2/_index.md b/content/rancher/v2.x/en/installation/options/helm2/_index.md new file mode 100644 index 00000000000..cb60fb18d6d --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/_index.md @@ -0,0 +1,58 @@ +--- +title: Kubernetes Installation Using Helm 2 +weight: 1 +--- + +> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. +> +> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> This section provides a copy of the older high-availability Kubernetes Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. + +This procedure walks you through setting up a 3-node cluster with Rancher Kubernetes Engine (RKE) and installing the Rancher chart with the Helm package manager. + +> **Important:** The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. + +> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. + +## Recommended Architecture + +- DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
+![High-availability Kubernetes Install]({{}}/img/rancher/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers + +## Required Tools + +The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +## Installation Outline + +- [Create Nodes and Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/) +- [Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/) +- [Initialize Helm (tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) +- [Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) + +## Additional Install Options + +- [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) + +## Previous Methods + +[RKE add-on install]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/) + +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. diff --git a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/_index.md similarity index 72% rename from content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/_index.md index ade47ce26fe..cd5da9e8763 100644 --- a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/_index.md @@ -9,9 +9,9 @@ Use your provider of choice to provision 3 nodes and a Load Balancer endpoint fo ### Node Requirements -View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). +View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{}}/rancher/v2.x/en/installation/requirements). -View the OS requirements for RKE at [RKE Requirements]({{< baseurl >}}/rke/latest/en/os/) +View the OS requirements for RKE at [RKE Requirements]({{}}/rke/latest/en/os/) ### Load Balancer @@ -24,7 +24,7 @@ Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configurat #### Examples -* [Nginx]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/nginx/) -* [Amazon NLB]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/) +* [Nginx]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx/) +* [Amazon NLB]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb/) -### [Next: Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/) +### [Next: Install Kubernetes with RKE]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/) diff --git a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx/_index.md similarity index 92% rename from content/rancher/v2.x/en/installation/ha/create-nodes-lb/nginx/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx/_index.md index 469541db519..253ca02b735 100644 --- a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nginx/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nginx/_index.md @@ -21,7 +21,7 @@ After installing NGINX, you need to update the NGINX configuration file, `nginx. 1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. -2. From `nginx.conf`, replace both occurences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/). +2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/). >**Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. diff --git a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb/_index.md similarity index 85% rename from content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb/_index.md index b5d4f4fcdf7..c22d03a5739 100644 --- a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/create-nodes-lb/nlb/_index.md @@ -18,6 +18,7 @@ Configuring an Amazon NLB is a multistage process. We've broken it down into mul Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. +> **Note:** Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ELB or ALB. ## Create Target Groups @@ -27,7 +28,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -![EC2 Load Balancing section]({{< baseurl >}}/img/rancher/ha/nlb/ec2-loadbalancing.png) +{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} Click **Create target group** to create the first target group, regarding TCP port 443. @@ -53,11 +54,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 443 settings**
-![Target group 443]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}}
**Screenshot Target group TCP port 443 Advanced settings**
-![Target group 443 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}}
@@ -85,11 +86,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 80 settings**
-![Target group 80]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}}
**Screenshot Target group TCP port 80 Advanced settings**
-![Target group 80 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}}
@@ -99,19 +100,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -![Edit target group 443]({{< baseurl >}}/img/rancher/ha/nlb/edit-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} Select the instances (Linux nodes) you want to add, and click **Add to registered**.
**Screenshot Add targets to target group TCP port 443**
-![Add targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/add-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}}
**Screenshot Added targets to target group TCP port 443**
-![Added targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/added-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} When the instances are added, click **Save** on the bottom right of the screen. diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-init/_index.md similarity index 72% rename from content/rancher/v2.x/en/installation/ha/helm-init/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/helm-init/_index.md index 3dc99fdcd48..3eefe165e48 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-init/_index.md @@ -1,12 +1,16 @@ --- -title: "3. Initialize Helm (Install Tiller)" +title: "Initialize Helm: Install the Tiller Service" +description: "With Helm, you can create configurable deployments instead of using static files. In order to use Helm, the Tiller service needs to be installed on your cluster." weight: 195 --- - Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. -> **Note:** For systems without direct internet access see [Helm - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. +For systems without direct internet access, see [Helm - Air Gap]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. + +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) ### Install Tiller on the Cluster @@ -57,6 +61,6 @@ Server: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b ### Issues or errors? -See the [Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/) page. +See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting/) page. -### [Next: Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/) +### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting/_index.md similarity index 89% rename from content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting/_index.md index c73013b5cb8..6dd085454eb 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-init/troubleshooting/_index.md @@ -20,4 +20,4 @@ helm version --server Error: could not find tiller ``` -When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) to install `tiller` with the correct `ServiceAccount`. +When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) to install `tiller` with the correct `ServiceAccount`. diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md similarity index 53% rename from content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md index 0e97eb52adc..0690c435343 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/_index.md @@ -5,11 +5,15 @@ weight: 200 Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. -> **Note:** For systems without direct internet access see [Air Gap: High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). +For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). + +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) ### Add the Helm Chart Repository -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories). +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). {{< release-channel >}} @@ -23,7 +27,7 @@ Rancher Server is designed to be secure by default and requires SSL/TLS configur There are three recommended options for the source of the certificate. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#external-tls-termination). +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). | Configuration | Chart option | Description | Requires cert-manager | |-----|-----|-----|-----| @@ -33,29 +37,64 @@ There are three recommended options for the source of the certificate. ### Optional: Install cert-manager -> **Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#external-tls-termination). +**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). -> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. +> **Important:** +> Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. -Rancher relies on [cert-manager](https://github.com/kubernetes/charts/tree/master/stable/cert-manager) version v0.5.2 from the official Kubernetes Helm chart repository to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.9.1, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + +Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. + +These instructions are adapted from the [official cert-manager documentation](https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html#installing-with-helm). -Install `cert-manager` from Kubernetes Helm chart repository. +1. Install the CustomResourceDefinition resources separately + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml + ``` + +1. Create the namespace for cert-manager + ```plain + kubectl create namespace cert-manager + ``` + +1. Label the cert-manager namespace to disable resource validation + ```plain + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + ``` + +1. Add the Jetstack Helm repository + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + ```plain + helm repo update + ``` + +1. Install the cert-manager Helm chart + ```plain + helm install \ + --name cert-manager \ + --namespace cert-manager \ + --version v0.9.1 \ + jetstack/cert-manager + ``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: ``` -helm install stable/cert-manager \ - --name cert-manager \ - --namespace kube-system \ - --version v0.5.2 +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m +cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m +cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m ``` -Wait for `cert-manager` to be rolled out: - -``` -kubectl -n kube-system rollout status deploy/cert-manager -Waiting for deployment "cert-manager" rollout to finish: 0 of 1 updated replicas are available... -deployment "cert-manager" successfully rolled out -``` +If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check the [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide.
@@ -112,7 +151,7 @@ deployment "rancher" successfully rolled out Create Kubernetes secrets from your own certificates for Rancher to use. -> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{< baseurl >}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) +> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.x/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - Set `hostname` and set `ingress.tls.source` to `secret`. - If you are using a Private CA signed certificate , add `--set privateCA=true` to the command shown below. @@ -125,7 +164,7 @@ helm install rancher-/rancher \ --set ingress.tls.source=secret ``` -Now that Rancher is deployed, see [Adding TLS Secrets]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. +Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. After adding the secrets, check if Rancher was rolled out successfully: @@ -149,11 +188,11 @@ It should show the same count for `DESIRED` and `AVAILABLE`. The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. -* [HTTP Proxy]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#http-proxy) -* [Private Docker Image Registry]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#private-registry-and-air-gap-installs) -* [TLS Termination on an External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#external-tls-termination) +* [HTTP Proxy]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#http-proxy) +* [Private Docker Image Registry]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#private-registry-and-air-gap-installs) +* [TLS Termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) -See the [Chart Options]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/) for the full list of options. +See the [Chart Options]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/) for the full list of options. ### Save your options @@ -163,4 +202,4 @@ Make sure you save the `--set` options you used. You will need to use the same o That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. -Doesn't work? Take a look at the [Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/) Page +Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting/) Page diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md similarity index 83% rename from content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md index 8ded6f0ac25..b9940f9cac5 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/_index.md @@ -24,7 +24,7 @@ weight: 276 | `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | | `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | | `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | +| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | @@ -40,20 +40,22 @@ weight: 276 | `rancherImage` | "rancher/rancher" | `string` - rancher image source | | `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | | `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ _Available as of v2.3.0_ | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. _Available as of v2.3.0_
### API Audit Log -Enabling the [API Audit Log]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing/). +Enabling the [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing/). -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{< baseurl >}}/rancher/v2.x/en/tools/logging/) for the `System` Project on the Rancher server cluster. +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/tools/logging/) for the `System` Project on the Rancher server cluster. ```plain --set auditLog.level=1 ``` -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{< baseurl >}}/rancher/v2.x/en/tools/logging/) for the Rancher server cluster or System Project. +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.x/en/tools/logging/) for the Rancher server cluster or System Project. Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. @@ -64,8 +66,8 @@ _Available as of v2.2.0_ You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. ```plain ---set 'extraEnv[0].name=CATTLE_SYSTEM_DEFAULT_REGISTRY' ---set 'extraEnv[0].value=http://registry.example.com/' +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' ``` ### TLS settings @@ -79,7 +81,7 @@ To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` a --set 'extraEnv[0].value=1.0' ``` -See [TLS settings]({{< baseurl >}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. +See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. ### Import `local` Cluster @@ -140,8 +142,8 @@ kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca- For details on installing Rancher with a private registry, see: -- [Air Gap: Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/) -- [Air Gap: High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/) +- [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) +- [Air Gap: Kubernetes Install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) ### External TLS Termination @@ -150,10 +152,21 @@ We recommend configuring your load balancer as a Layer 4 balancer, forwarding pl You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/#using-a-private-ca-signed-certificate) to add the CA cert for Rancher. +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/#using-a-private-ca-signed-certificate) to add the CA cert for Rancher. Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. +#### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + #### Required Headers * `Host` @@ -179,7 +192,7 @@ This NGINX configuration is tested on NGINX 1.14. >**Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). * Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -* Replace both occurences of `FQDN` to the DNS name for Rancher. +* Replace both occurrences of `FQDN` to the DNS name for Rancher. * Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. ``` diff --git a/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md new file mode 100644 index 00000000000..8920125b9f8 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/tls-secrets/_index.md @@ -0,0 +1,32 @@ +--- +title: Adding Kubernetes TLS Secrets +description: Read about how to populate the Kubernetes TLS secret for a Rancher installation +weight: 276 +--- + +Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. + +Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. + +Use `kubectl` with the `tls` secret type to create the secrets. + +``` +kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. + +### Using a Private CA Signed Certificate + +If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. + +Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. + +>**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA certificate. + +``` +kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem +``` diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting/_index.md similarity index 90% rename from content/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting/_index.md index 46a3508e593..d5ef3d045f6 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/helm-rancher/troubleshooting/_index.md @@ -124,10 +124,10 @@ W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret ### no matches for kind "Issuer" -The [SSL configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/#choose-your-ssl-configuration) option you have chosen requires [cert-manager]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/#optional-install-cert-manager) to be installed before installing Rancher or else the following error is shown: +The [SSL configuration]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/#choose-your-ssl-configuration) option you have chosen requires [cert-manager]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/#optional-install-cert-manager) to be installed before installing Rancher or else the following error is shown: ``` Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" ``` -Install [cert-manager]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/#optional-install-cert-manager) and try installing Rancher again. +Install [cert-manager]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/#optional-install-cert-manager) and try installing Rancher again. diff --git a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/_index.md similarity index 83% rename from content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/_index.md index b57911e5fb2..a88ad2801d9 100644 --- a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/_index.md @@ -5,7 +5,7 @@ weight: 190 Use RKE to install Kubernetes with a high availability etcd configuration. ->**Note:** For systems without direct internet access see [Air Gap: High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/) for install details. +>**Note:** For systems without direct internet access see [Air Gap: Kubernetes install]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/) for install details. ### Create the `rancher-cluster.yml` File @@ -50,9 +50,9 @@ services: RKE has many configuration options for customizing the install to suit your specific environment. -Please see the [RKE Documentation]({{< baseurl >}}/rke/latest/en/config-options/) for the full list of options and capabilities. +Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. -For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{< baseurl >}}/rancher/v2.x/en/installation/options/etcd/). +For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{}}/rancher/v2.x/en/installation/options/etcd/). ### Run RKE @@ -120,11 +120,11 @@ kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{< baseurl >}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ ### Issues or errors? -See the [Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting/) page. +See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/troubleshooting/) page. -### [Next: Initialize Helm (Install tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) +### [Next: Initialize Helm (Install tiller)]({{}}/rancher/v2.x/en/installation/options/helm2/helm-init/) diff --git a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/troubleshooting/_index.md similarity index 100% rename from content/rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/kubernetes-rke/troubleshooting/_index.md diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/_index.md new file mode 100644 index 00000000000..d540cb07167 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/_index.md @@ -0,0 +1,16 @@ +--- +title: RKE Add-On Install +weight: 276 +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + + +* [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb) +* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb) +* [HTTP Proxy Configuration for a Kubernetes installation]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy/) +* [Troubleshooting RKE Add-on Installs]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/_index.md new file mode 100644 index 00000000000..a99f7966714 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/_index.md @@ -0,0 +1,55 @@ +--- +title: Enable API Auditing +weight: 300 +aliases: + - /rke/latest/en/config-options/add-ons/api-auditing/ +--- + +>**Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. + +## In-line Arguments + +Enable API Auditing using RKE by adding arguments to your Rancher container. + +To enable API auditing: + +- Add API Auditing arguments (`args`) to your Rancher container. +- Declare a `mountPath` in the `volumeMounts` directive of the container. +- Declare a `path` in the `volumes` directive. + +For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing). + +```yaml +... +containers: + - image: rancher/rancher:latest + imagePullPolicy: Always + name: cattle-server + args: ["--audit-log-path", "/var/log/auditlog/rancher-api-audit.log", "--audit-log-maxbackup", "5", "--audit-log-maxsize", "50", "--audit-level", "2"] + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + volumeMounts: + - mountPath: /etc/rancher/ssl + name: cattle-keys-volume + readOnly: true + - mountPath: /var/log/auditlog + name: audit-log-dir + volumes: + - name: cattle-keys-volume + secret: + defaultMode: 420 + secretName: cattle-keys-server + - name: audit-log-dir + hostPath: + path: /var/log/rancher/auditlog + type: Directory +``` diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/_index.md new file mode 100644 index 00000000000..fecaef3d2b3 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/_index.md @@ -0,0 +1,402 @@ +--- +title: Kubernetes Install with External Load Balancer (TCP/Layer 4) +weight: 275 +aliases: +- /rancher/v2.x/en/installation/k8s-install-server-install/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 4 load balancer (TCP) +- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) + +In a Kubernetes setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. + +Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers +![High-availability Kubernetes installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +
+ +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +## 2. Configure Load Balancer + +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{}}/rancher/v2.x/en/installation/k8s-install-server-install/nlb/) + +>**Note:** +> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. +> +>One caveat: do not use one of your Rancher nodes as the load balancer. + +### A. Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. + +### B. Create NGINX Configuration + +After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). + + >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + + **Example NGINX config:** + ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + http { + server { + listen 80; + return 301 https://$host$request_uri; + } + } + + stream { + upstream rancher_servers { + least_conn; + server IP_NODE_1:443 max_fails=3 fail_timeout=5s; + server IP_NODE_2:443 max_fails=3 fail_timeout=5s; + server IP_NODE_3:443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers; + } + } + ``` + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +### Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

+ +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
+ +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
`3-node-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate.yml) + - [Template for certificate signed by recognized CA
`3-node-certificate-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate-recognizedca.yml) + + >**Advanced Config Options:** + > + >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docket socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} + +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in [PEM format](#pem). +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Intermediate Certificates](#cert-order). + +1. In `kind: Secret` with `name: cattle-keys-ingress`: + + * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) + * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + + >**Note:** + > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-ingress + namespace: cattle-system + type: Opaque + data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ``` + +2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). + + >**Note:** + > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + + + **Step Result:** The file should look like the example below (the base64 encoded string should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + ``` + +{{% /accordion %}} + +{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} +>**Note:** +> If you are using Self Signed Certificate, [click here](#option-a-bring-your-own-certificate-self-signed) to proceed. + +If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the [intermediate certificates](#cert-order) in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. + +In the `kind: Secret` with `name: cattle-keys-ingress`: + +* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) +* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + +>**Note:** +> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cattle-keys-ingress + namespace: cattle-system +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +``` + +{{% /accordion %}} + + + +## 8. Configure FQDN + +There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). + +In the `kind: Ingress` with `name: cattle-ingress-http`: + +* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). + +After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): + +```yaml + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + tls: + - secretName: cattle-keys-ingress + hosts: + - rancher.yourdomain.com +``` + +Save the `.yml` file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + +``` +rke up --config rancher-cluster.yml +``` + +**Step Result:** The output should be similar to the snippet below: + +``` +INFO[0000] Building Kubernetes cluster +INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] +INFO[0000] [network] Deploying port listener containers +INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] +... +INFO[0101] Finished building Kubernetes cluster successfully +``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +You have a couple of options: + +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). + +
+ +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb/_index.md similarity index 84% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb/_index.md index 9ba02bc211a..1e6bdcffe44 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb/_index.md @@ -7,9 +7,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives @@ -36,7 +36,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -![EC2 Load Balancing section]({{< baseurl >}}/img/rancher/ha/nlb/ec2-loadbalancing.png) +{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} Click **Create target group** to create the first target group, regarding TCP port 443. @@ -62,11 +62,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 443 settings**
-![Target group 443]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}}
**Screenshot Target group TCP port 443 Advanced settings**
-![Target group 443 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}}
@@ -94,11 +94,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 80 settings**
-![Target group 80]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}}
**Screenshot Target group TCP port 80 Advanced settings**
-![Target group 80 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}}
@@ -108,19 +108,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -![Edit target group 443]({{< baseurl >}}/img/rancher/ha/nlb/edit-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} Select the instances (Linux nodes) you want to add, and click **Add to registered**.
**Screenshot Add targets to target group TCP port 443**
-![Add targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/add-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}}
**Screenshot Added targets to target group TCP port 443**
-![Added targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/added-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} When the instances are added, click **Save** on the bottom right of the screen. diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/_index.md new file mode 100644 index 00000000000..379ccf801b5 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/_index.md @@ -0,0 +1,288 @@ +--- +title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) +weight: 276 +aliases: +- /rancher/v2.x/en/installation/ha-server-install-external-lb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 7 Loadbalancer with SSL termination (HTTPS) +- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) + +In an Kubernetes setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. + +Kubernetes Rancher install with layer 7 load balancer, depicting SSL termination at load balancer +![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +## 2. Configure Load Balancer + +When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. + +The load balancer has to be configured to support the following: + +* **WebSocket** connections +* **SPDY** / **HTTP/2** protocols +* Passing / setting the following headers: + +| Header | Value | Description | +|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | +| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | +| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | +| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | + +Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. + +We have example configurations for the following load balancers: + +* [Amazon ALB configuration](alb/) +* [NGINX configuration](nginx/) + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

+ +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
+ +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-certificate.yml) + - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-recognizedca.yml) + + >**Advanced Config Options:** + > + >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docket socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in [PEM format](#pem). +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) + +>**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +{{% /accordion %}} +{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} +If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. +{{% /accordion %}} + +## 8. Configure FQDN + +There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). + +1. Open `rancher-cluster.yml`. + +2. In the `kind: Ingress` with `name: cattle-ingress-http:` + + Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ``` + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + ``` + + +3. Save the file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + + ``` + rke up --config rancher-cluster.yml + ``` + + **Step Result:** The output should be similar to the snippet below: + + ``` + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] + INFO[0000] [network] Deploying port listener containers + INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] + ... + INFO[0101] Finished building Kubernetes cluster successfully + ``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration]({{}}/rancher/v2.x/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.x/en/tasks/clusters/creating-a-cluster/). + +
+ +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/alb/_index.md similarity index 89% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/alb/_index.md index 229cfa632d0..3741167921f 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/alb/_index.md @@ -7,9 +7,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx/_index.md new file mode 100644 index 00000000000..00ed78da136 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx/_index.md @@ -0,0 +1,40 @@ +--- +title: NGINX Configuration +weight: 277 +aliases: +- /rancher/v2.x/en/installation/ha-server-install-external-lb/nginx/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +## Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. + +For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +## Create NGINX Configuration + +See [Example NGINX config]({{}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/chart-options/#example-nginx-config). + +## Run NGINX + +* Reload or restart NGINX + + ```` + # Reload NGINX + nginx -s reload + + # Restart NGINX + # Depending on your Linux distribution + service nginx restart + systemctl restart nginx + ```` + +## Browse to Rancher UI + +You should now be to able to browse to `https://FQDN`. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy/_index.md similarity index 73% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md rename to content/rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy/_index.md index 9a0e7edb6e4..5e7eb1f4a80 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/proxy/_index.md @@ -5,9 +5,9 @@ weight: 277 > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. @@ -21,12 +21,12 @@ NO_PROXY | Network address(es), network address range(s) and do > **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. -## High Availability Installation +## Kubernetes installation -When using High Availability Installation, the environment variables need to be added to the RKE Config File template. +When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. -* [High Availability Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install/#5-download-rke-config-file-template) -* [High Availability Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install-external-lb/#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{}}/rancher/v2.x/en/installation/k8s-install-server-install/#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{}}/rancher/v2.x/en/installation/k8s-install-server-install-external-lb/#5-download-rke-config-file-template) The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md new file mode 100644 index 00000000000..4571ade2775 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md @@ -0,0 +1,48 @@ +--- +title: 404 - default backend +weight: 30 +aliases: +- /rancher/v2.x/en/installation/troubleshooting-ha/404-default-backend/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. + +When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. + +### Possible causes + +The nginx ingress controller is not able to serve the configured host in `rancher-cluster.yml`. This should be the FQDN you configured to access Rancher. You can check if it is properly configured by viewing the ingress that is created by running the following command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress -n cattle-system -o wide +``` + +Check if the `HOSTS` column is displaying the FQDN you configured in the template, and that the used nodes are listed in the `ADDRESS` column. If that is configured correctly, we can check the logging of the nginx ingress controller. + +The logging of the nginx ingress controller will show why it cannot serve the requested host. To view the logs, you can run the following command + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx +``` + +Errors + +* `x509: certificate is valid for fqdn, not your_configured_fqdn` + +The used certificates do not contain the correct hostname. Generate new certificates that contain the chosen FQDN to access Rancher and redeploy. + +* `Port 80 is already in use. Please check the flag --http-port` + +There is a process on the node occupying port 80, this port is needed for the nginx ingress controller to route requests to Rancher. You can find the process by running the command: `netstat -plant | grep \:80`. + +Stop/kill the process and redeploy. + +* `unexpected error creating pem file: no valid PEM formatted block found` + +The base64 encoded string configured in the template is not valid. Please check if you can decode the configured string using `base64 -D STRING`, this should return the same output as the content of the file you used to generate the string. If this is correct, please check if the base64 encoded string is placed directly after the key, without any newlines before, in between or after. (For example: `tls.crt: LS01..`) diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/_index.md new file mode 100644 index 00000000000..45201a0dc19 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/_index.md @@ -0,0 +1,32 @@ +--- +title: Troubleshooting HA RKE Add-On Install +weight: 370 +aliases: +- /rancher/v2.x/en/installation/troubleshooting-ha/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +This section contains common errors seen when setting up a Kubernetes installation. + +Choose from the following options: + +- [Generic troubleshooting](generic-troubleshooting/) + + In this section, you can find generic ways to debug your Kubernetes cluster. + +- [Failed to set up SSH tunneling for host]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) + + In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. + +- [Failed to get job complete status](job-complete-status/) + + In this section, you can find errors related to deploying addons. + +- [404 - default backend](404-default-backend/) + + In this section, you can find errors related to the `404 - default backend` page that is shown when trying to access Rancher. diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md new file mode 100644 index 00000000000..bffba0352bd --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md @@ -0,0 +1,161 @@ +--- +title: Generic troubleshooting +weight: 5 +aliases: +- /rancher/v2.x/en/installation/troubleshooting-ha/generic-troubleshooting/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +Below are steps that you can follow to determine what is wrong in your cluster. + +### Double check if all the required ports are opened in your (host) firewall + +Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. + +### All nodes should be present and in **Ready** state + +To check, run the command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes +``` + +If a node is not shown in this output or a node is not in **Ready** state, you can check the logging of the `kubelet` container. Login to the node and run `docker logs kubelet`. + +### All pods/jobs should be in **Running**/**Completed** state + +To check, run the command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get pods --all-namespaces +``` + +If a pod is not in **Running** state, you can dig into the root cause by running: + +#### Describe pod + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml describe pod POD_NAME -n NAMESPACE +``` + +#### Pod container logs + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs POD_NAME -n NAMESPACE +``` + +If a job is not in **Completed** state, you can dig into the root cause by running: + +#### Describe job + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml describe job JOB_NAME -n NAMESPACE +``` + +#### Logs from the containers of pods of the job + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=JOB_NAME -n NAMESPACE +``` + +### Check ingress + +Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (address(es) it will be routed to). + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress --all-namespaces +``` + +### List all Kubernetes cluster events + +Kubernetes cluster events are stored, and can be retrieved by running: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get events --all-namespaces +``` + +### Check Rancher container logging + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=cattle -n cattle-system +``` + +### Check NGINX ingress controller logging + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx +``` + +### Check if overlay network is functioning correctly + +The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. + +To test the overlay network, you can launch the following `DaemonSet` definition. This will run an `alpine` container on every host, which we will use to run a `ping` test between containers on all hosts. + +1. Save the following file as `ds-alpine.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: alpine + spec: + selector: + matchLabels: + name: alpine + template: + metadata: + labels: + name: alpine + spec: + tolerations: + - effect: NoExecute + key: "node-role.kubernetes.io/etcd" + value: "true" + - effect: NoSchedule + key: "node-role.kubernetes.io/controlplane" + value: "true" + containers: + - image: alpine + imagePullPolicy: Always + name: alpine + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + ``` + +2. Launch it using `kubectl --kubeconfig kube_config_rancher-cluster.yml create -f ds-alpine.yml` +3. Wait until `kubectl --kubeconfig kube_config_rancher-cluster.yml rollout status ds/alpine -w` returns: `daemon set "alpine" successfully rolled out`. +4. Run the following command to let each container on every host ping each other (it's a single line command). + + ``` + echo "=> Start"; kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --kubeconfig kube_config_rancher-cluster.yml --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End" + ``` + +5. When this command has finished running, the output indicating everything is correct is: + + ``` + => Start + => End + ``` + +If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. + +Example error output of a situation where NODE1 had the UDP ports blocked. + +``` +=> Start +command terminated with exit code 1 +NODE2 cannot reach NODE1 +command terminated with exit code 1 +NODE3 cannot reach NODE1 +command terminated with exit code 1 +NODE1 cannot reach NODE2 +command terminated with exit code 1 +NODE1 cannot reach NODE3 +=> End +``` diff --git a/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md new file mode 100644 index 00000000000..ab746496d68 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md @@ -0,0 +1,62 @@ +--- +title: Failed to get job complete status +weight: 20 +aliases: +- /rancher/v2.x/en/installation/troubleshooting-ha/job-complete-status/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/options/helm2/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. + +When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. + +### Failed to deploy addon execute job [rke-user-includes-addons]: Failed to get job complete status + +Something is wrong in the addons definitions, you can run the following command to get the root cause in the logging of the job: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=rke-user-addon-deploy-job -n kube-system +``` + +#### error: error converting YAML to JSON: yaml: line 9: + +The structure of the addons definition in `rancher-cluster.yml` is wrong. In the different resources specified in the addons section, there is a error in the structure of the YAML. The pointer `yaml line 9` references to the line number of the addon that is causing issues. + +Things to check +
    +
      +
    • Is each of the base64 encoded certificate string placed directly after the key, for example: `tls.crt: LS01...`, there should be no newline/space before, in between or after.
    • +
    • Is the YAML properly formatted, each indentation should be 2 spaces as shown in the template files.
    • +
    • Verify the integrity of your certificate by running this command `cat MyCertificate | base64 -d` on Linux, `cat MyCertificate | base64 -D` on Mac OS . If any error exists, the command output will tell you. +
    +
+ +#### Error from server (BadRequest): error when creating "/etc/config/rke-user-addon.yaml": Secret in version "v1" cannot be handled as a Secret + +The base64 string of one of the certificate strings is wrong. The log message will try to show you what part of the string is not recognized as valid base64. + +Things to check +
    +
      +
    • Check if the base64 string is valid by running one of the commands below:
    • + +``` +# MacOS +echo BASE64_CRT | base64 -D +# Linux +echo BASE64_CRT | base64 -d +# Windows +certutil -decode FILENAME.base64 FILENAME.verify +``` + +
    +
+ +#### The Ingress "cattle-ingress-http" is invalid: spec.rules[0].host: Invalid value: "IP": must be a DNS name, not an IP address + +The host value can only contain a host name, as it is needed by the ingress controller to match the hostname and pass to the correct backend. diff --git a/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md b/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md new file mode 100644 index 00000000000..b2b84f724f3 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md @@ -0,0 +1,68 @@ +--- +title: Setting up Local System Charts for Air Gapped Installations +weight: 1120 +aliases: + - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md +--- + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. + +In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag in Rancher v2.3.0, and using a Git mirror for Rancher versions prior to v2.3.0. + +# Using Local System Charts in Rancher v2.3.0 + +In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. + +Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.x/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/#c-install-rancher) instructions. + +# Setting Up System Charts for Rancher Prior to v2.3.0 + +### A. Prepare System Charts + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. + +Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. + +### B. Configure System Charts + +Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. + +{{% tabs %}} +{{% tab "Rancher UI" %}} + +In the catalog management page in the Rancher UI, follow these steps: + +1. Go to the **Global** view. + +1. Click **Tools > Catalogs.** + +1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **Ellipsis (...) > Edit.** + +1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. + +1. Click **Save.** + +**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. + +{{% /tab %}} +{{% tab "Rancher API" %}} + +1. Log into Rancher. + +1. Open `https:///v3/catalogs/system-library` in your browser. + + {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} + +1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. + + {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} + +1. Click **Show Request** + +1. Click **Send Request** + +**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/options/rke-add-on/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/_index.md new file mode 100644 index 00000000000..4904cb0edf6 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/_index.md @@ -0,0 +1,15 @@ +--- +title: RKE Add-On Install +weight: 276 +--- + +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +- [Kubernetes Installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb) +- [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb) +- [HTTP Proxy Configuration for a Kubernetes installation]({{}}/rancher/v2.x/en/installation/options/rke-add-on/proxy/) +- [Troubleshooting RKE Add-on Installs]({{}}/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/api-auditing/_index.md similarity index 80% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/api-auditing/_index.md index 36dc7198bf2..914f283c582 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/api-auditing/_index.md @@ -7,9 +7,9 @@ aliases: >**Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. @@ -23,7 +23,7 @@ To enable API auditing: - Declare a `mountPath` in the `volumeMounts` directive of the container. - Declare a `path` in the `volumes` directive. -For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing). +For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing). ```yaml ... diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/_index.md similarity index 93% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/_index.md index 70a5558a7d3..d2aac77497f 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/_index.md @@ -1,15 +1,15 @@ --- -title: HA Install with External Load Balancer (TCP/Layer 4) +title: Kubernetes Install with External Load Balancer (TCP/Layer 4) weight: 275 aliases: -- /rancher/v2.x/en/installation/ha-server-install/ +- /rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: @@ -18,8 +18,8 @@ This procedure walks you through setting up a 3-node cluster using the Rancher K In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. -HA Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers -![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) +Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers +![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) ## Installation Outline @@ -46,11 +46,11 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). +Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). ## 2. Configure Load Balancer -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install/nlb/) +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{}}/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/) >**Note:** > In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. @@ -150,7 +150,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{< baseurl >}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -169,8 +169,8 @@ RKE uses a `.yml` config file to install and configure your Kubernetes cluster. >**Advanced Config Options:** > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{< baseurl >}}/rke/latest/en/config-options/). + >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/k8s-install/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). 2. Rename the file to `rancher-cluster.yml`. @@ -186,7 +186,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docket socket, you can test this by logging in with the specified user and run `docker ps`. >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{< baseurl >}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -210,7 +210,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec services: etcd: - backup: false + backup: false ## 7. Configure Certificates @@ -392,8 +392,8 @@ During installation, RKE automatically generates a config file named `kube_confi You have a couple of options: -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availablility Back Up and Restoration]({{< baseurl >}}/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/).
diff --git a/content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/_index.md new file mode 100644 index 00000000000..fc41dcad175 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/_index.md @@ -0,0 +1,182 @@ +--- +title: Amazon NLB Configuration +weight: 277 +aliases: +- /rancher/v2.x/en/installation/ha-server-install/nlb/ +- /rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +## Objectives + +Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. + +1. [Create Target Groups](#create-target-groups) + + Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +2. [Register Targets](#register-targets) + + Add your Linux nodes to the target groups. + +3. [Create Your NLB](#create-your-nlb) + + Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + + +## Create Target Groups + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX controller on the nodes will make sure that port 80 gets redirected to port 443. + +Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. + +The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. + +{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} + +Click **Create target group** to create the first target group, regarding TCP port 443. + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-443` +Protocol | `TCP` +Port | `443` +Target type | `instance` +VPC | Choose your VPC +Protocol
(Health Check) | `HTTP` +Path
(Health Check) | `/healthz` +Port (Advanced health check) | `override`,`80` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +
+**Screenshot Target group TCP port 443 settings**
+{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} + +
+**Screenshot Target group TCP port 443 Advanced settings**
+{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} + +
+ +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-80` +Protocol | `TCP` +Port | `80` +Target type | `instance` +VPC | Choose your VPC +Protocol
(Health Check) | `HTTP` +Path
(Health Check) | `/healthz` +Port (Advanced health check) | `traffic port` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +
+**Screenshot Target group TCP port 80 settings**
+{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} + +
+**Screenshot Target group TCP port 80 Advanced settings**
+{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} + +
+ +## Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +
+**Screenshot Add targets to target group TCP port 443**
+ +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} + +
+**Screenshot Added targets to target group TCP port 443**
+ +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +## Create Your NLB + +Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. + +5. Complete the **Step 1: Configure Load Balancer** form. + - **Basic Configuration** + + - Name: `rancher` + - Scheme: `internet-facing` + - **Listeners** + + Add the **Load Balancer Protocols** and **Load Balancer Ports** below. + - `TCP`: `443` + + - **Availability Zones** + + - Select Your **VPC** and **Availability Zones**. + +6. Complete the **Step 2: Configure Routing** form. + + - From the **Target Group** drop-down, choose **Existing target group**. + + - From the **Name** drop-down, choose `rancher-tcp-443`. + + - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. + +8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. + +9. After AWS creates the NLB, click **Close**. + +## Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/_index.md similarity index 88% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/_index.md index 8460a64160e..0a99c09559c 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/_index.md @@ -1,25 +1,25 @@ --- -title: HA Install with External Load Balancer (HTTPS/Layer 7) +title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) weight: 276 aliases: -- /rancher/v2.x/en/installation/ha-server-install-external-lb/ +- /rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb --- > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: -- Layer 7 Loadbalancer with SSL termination (HTTPS) +- Layer 7 load balancer with SSL termination (HTTPS) - [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. -HA Rancher install with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha-l7.svg) +Rancher installed on a Kubernetes cluster with layer 7 load balancer, depicting SSL termination at load balancer +![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) ## Installation Outline @@ -44,7 +44,7 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). +Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements). ## 2. Configure Load Balancer @@ -98,7 +98,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{< baseurl >}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -117,8 +117,8 @@ RKE uses a YAML config file to install and configure your Kubernetes cluster. Th >**Advanced Config Options:** > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{< baseurl >}}/rke/latest/en/config-options/). + >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.x/en/installation/k8s-install/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). 2. Rename the file to `rancher-cluster.yml`. @@ -134,8 +134,8 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docket socket, you can test this by logging in with the specified user and run `docker ps`. >**Note:** - > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{< baseurl >}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + > + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -159,7 +159,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec services: etcd: - backup: false + backup: false ## 7. Configure Certificates @@ -278,8 +278,8 @@ During installation, RKE automatically generates a config file named `kube_confi ## What's Next? -- **Recommended:** Review [Creating Backups—High Availablility Back Up and Restoration]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{< baseurl >}}/rancher/v2.x/en/tasks/clusters/creating-a-cluster/). +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration]({{}}/rancher/v2.x/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.x/en/tasks/clusters/creating-a-cluster/).
diff --git a/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/alb/_index.md new file mode 100644 index 00000000000..760f25be970 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/alb/_index.md @@ -0,0 +1,104 @@ +--- +title: Amazon ALB Configuration +weight: 277 +aliases: +- /rancher/v2.x/en/installation/ha-server-install-external-lb/alb/ +- /rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +## Objectives + +Configuring an Amazon ALB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. + +1. [Create Target Group](#create-target-group) + + Begin by creating one target group for the http protocol. You'll add your Linux nodes to this group. + +2. [Register Targets](#register-targets) + + Add your Linux nodes to the target group. + +3. [Create Your ALB](#create-your-alb) + + Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + + +## Create Target Group + +Your first ALB configuration step is to create one target group for HTTP. + +Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. + +The document below will guide you through this process. Use the data in the tables below to complete the procedure. + +[Amazon Documentation: Create a Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-target-group.html) + +### Target Group (HTTP) + +Option | Setting +----------------------------|------------------------------------ +Target Group Name | `rancher-http-80` +Protocol | `HTTP` +Port | `80` +Target type | `instance` +VPC | Choose your VPC +Protocol
(Health Check) | `HTTP` +Path
(Health Check) | `/healthz` + +## Register Targets + +Next, add your Linux nodes to your target group. + +[Amazon Documentation: Register Targets with Your Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-register-targets.html) + +### Create Your ALB + +Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target group you created in [Create Target Group](#create-target-group). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Application Load Balancer**. + +5. Complete the **Step 1: Configure Load Balancer** form. + - **Basic Configuration** + + - Name: `rancher-http` + - Scheme: `internet-facing` + - IP address type: `ipv4` + - **Listeners** + + Add the **Load Balancer Protocols** and **Load Balancer Ports** below. + - `HTTP`: `80` + - `HTTPS`: `443` + + - **Availability Zones** + + - Select Your **VPC** and **Availability Zones**. + +6. Complete the **Step 2: Configure Security Settings** form. + + Configure the certificate you want to use for SSL termination. + +7. Complete the **Step 3: Configure Security Groups** form. + +8. Complete the **Step 4: Configure Routing** form. + + - From the **Target Group** drop-down, choose **Existing target group**. + + - Add target group `rancher-http-80`. + +9. Complete **Step 5: Register Targets**. Since you registered your targets earlier, all you have to do it click **Next: Review**. + +10. Complete **Step 6: Review**. Look over the load balancer details and click **Create** when you're satisfied. + +11. After AWS creates the ALB, click **Close**. diff --git a/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/nginx/_index.md new file mode 100644 index 00000000000..c2b9f8fe1ad --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/nginx/_index.md @@ -0,0 +1,41 @@ +--- +title: NGINX Configuration +weight: 277 +aliases: +- /rancher/v2.x/en/installation/ha-server-install-external-lb/nginx/ +- /rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +## Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. + +For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +## Create NGINX Configuration + +See [Example NGINX config]({{}}/rancher/v2.x/en/installation/options/chart-options/#example-nginx-config). + +## Run NGINX + +* Reload or restart NGINX + + ```` + # Reload NGINX + nginx -s reload + + # Restart NGINX + # Depending on your Linux distribution + service nginx restart + systemctl restart nginx + ```` + +## Browse to Rancher UI + +You should now be to able to browse to `https://FQDN`. diff --git a/content/rancher/v2.x/en/installation/options/rke-add-on/proxy/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/proxy/_index.md new file mode 100644 index 00000000000..4345e9cb121 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/proxy/_index.md @@ -0,0 +1,71 @@ +--- +title: HTTP Proxy Configuration +weight: 277 +aliases: + - /rancher/v2.x/en/installation/ha/rke-add-on/proxy +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. + +Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. + +Environment variable | Purpose +--------------------------|--------- +HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) +HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) +NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) + +> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. + +## Installing Rancher on a Kubernetes Cluster + +When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. + +* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{}}/rancher/v2.x/en/installation/ha-server-install/#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{}}/rancher/v2.x/en/installation/ha-server-install-external-lb/#5-download-rke-config-file-template) + +The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: + +* `localhost` +* `127.0.0.1` +* `0.0.0.0` +* Configured `service_cluster_ip_range` (default: `10.43.0.0/16`) + +The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage of the proxy when accessing network range `192.168.10.0/24`, the configured `service_cluster_ip_range` (`10.43.0.0/16`) and every hostname under the domain `example.com`. If you have changed the `service_cluster_ip_range`, you have to update the value below accordingly. + +```yaml +... +--- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:latest + imagePullPolicy: Always + name: cattle-server + env: + - name: HTTP_PROXY + value: "http://192.168.10.1:3128" + - name: HTTPS_PROXY + value: "http://192.168.10.1:3128" + - name: NO_PROXY + value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,192.168.10.0/24,example.com" + ports: +... +``` diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/404-default-backend/_index.md similarity index 86% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/404-default-backend/_index.md index 086744c0d50..0b036c0df28 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/404-default-backend/_index.md @@ -7,9 +7,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/_index.md similarity index 58% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/_index.md index 5aeeafc2224..e9362246aec 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/_index.md @@ -7,11 +7,11 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. -This section contains common errors seen when setting up a High Availability Installation. +This section contains common errors seen when setting up a Kubernetes installation. Choose from the following options: @@ -19,7 +19,7 @@ Choose from the following options: In this section, you can find generic ways to debug your Kubernetes cluster. -- [Failed to set up SSH tunneling for host](ssh-tunneling/) +- [Failed to set up SSH tunneling for host]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/generic-troubleshooting/_index.md similarity index 87% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/generic-troubleshooting/_index.md index 543664669d9..df8d4381f4a 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/generic-troubleshooting/_index.md @@ -7,15 +7,15 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. Below are steps that you can follow to determine what is wrong in your cluster. ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) are opened in your (host) firewall. +Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. ### All nodes should be present and in **Ready** state @@ -143,7 +143,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition => End ``` -If you see error in the output, that means that the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for overlay networking are not opened between the hosts indicated. +If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. Example error output of a situation where NODE1 had the UDP ports blocked. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/job-complete-status/_index.md similarity index 87% rename from content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md rename to content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/job-complete-status/_index.md index ca20cded639..8fd5e32b41b 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md +++ b/content/rancher/v2.x/en/installation/options/rke-add-on/troubleshooting/job-complete-status/_index.md @@ -7,9 +7,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/content/rancher/v2.x/en/installation/options/server-tags/_index.md b/content/rancher/v2.x/en/installation/options/server-tags/_index.md new file mode 100644 index 00000000000..103b487d081 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/server-tags/_index.md @@ -0,0 +1,93 @@ +--- +title: Choosing a Rancher Version +weight: 230 +aliases: + - /rancher/v2.x/en/installation/server-tags +--- + +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** + +{{% tabs %}} +{{% tab "Helm Charts" %}} + +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/installation/k8s-install/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. + +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +### Helm Chart Repositories + +Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. + +| Type | Command to Add the Repo | Description of the Repo | +| -------------- | ------------ | ----------------- | +| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | +| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | +| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | + +
+Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). + +> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` prior to v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. + +### Helm Chart Versions + +Rancher Helm chart versions match the Rancher version (i.e `appVersion`). + +For the Rancher v2.1.x versions, there were some Helm charts, that were using a version that was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. + +### Switching to a Different Helm Chart Repository + +After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. + +> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. + +{{< release-channel >}} + +1. List the current Helm chart repositories. + + ```plain + helm repo list + + NAME URL + stable https://kubernetes-charts.storage.googleapis.com + rancher- https://releases.rancher.com/server-charts/ + ``` + +2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. + + ```plain + helm repo remove rancher- + ``` + +3. Add the Helm chart repository that you want to start installing Rancher from. + + ```plain + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/) from the new Helm chart repository. +{{% /tab %}} +{{% tab "Docker Images" %}} +When performing [Docker installs]({{}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. + +| Tag | Description | +| -------------------------- | ------ | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | + +> **Notes:** +> +> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. +> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md similarity index 95% rename from content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md rename to content/rancher/v2.x/en/installation/options/tls-secrets/_index.md index 2866ca911e5..76262c0a12f 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md +++ b/content/rancher/v2.x/en/installation/options/tls-secrets/_index.md @@ -1,6 +1,8 @@ --- title: Adding TLS Secrets weight: 276 +aliases: +- /rancher/v2.x/en/installation/k8s-install/helm-rancher/tls-secrets/ --- Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. diff --git a/content/rancher/v2.x/en/installation/options/tls-settings/_index.md b/content/rancher/v2.x/en/installation/options/tls-settings/_index.md index 589af788f27..2cefd9fc853 100644 --- a/content/rancher/v2.x/en/installation/options/tls-settings/_index.md +++ b/content/rancher/v2.x/en/installation/options/tls-settings/_index.md @@ -1,8 +1,6 @@ --- title: TLS settings weight: 11000 -aliases: - - /rancher/v2.x/en/admin-settings/tls-settings/ --- _Available as of v2.1.7_ @@ -13,9 +11,9 @@ In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. -- [Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/#tls-settings) +- [Installing Rancher on a single node with Docker]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#tls-settings) -- [HA Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#tls-settings) +- [Installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/options/chart-options/#tls-settings) ## TLS settings diff --git a/content/rancher/v2.x/en/installation/options/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/options/troubleshooting/_index.md new file mode 100644 index 00000000000..556800e0432 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/troubleshooting/_index.md @@ -0,0 +1,189 @@ +--- +title: Troubleshooting the Rancher Server Kubernetes Cluster +weight: 276 +aliases: + - /rancher/v2.x/en/installation/k8s-install/helm-rancher/troubleshooting/ + - /rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting + - /rancher/v2.x/en/installation/k8s-install/kubernetes-rke/troubleshooting +--- + +This section describes how to troubleshoot an installation of Rancher on a Kubernetes cluster. + +### Relevant Namespaces + +Most of the troubleshooting will be done on objects in these 3 namespaces. + +- `cattle-system` - `rancher` deployment and pods. +- `ingress-nginx` - Ingress controller pods and services. +- `kube-system` - `tiller` and `cert-manager` pods. + +### "default backend - 404" + +A number of things can cause the ingress-controller not to forward traffic to your rancher instance. Most of the time its due to a bad ssl configuration. + +Things to check + +- [Is Rancher Running](#is-rancher-running) +- [Cert CN is "Kubernetes Ingress Controller Fake Certificate"](#cert-cn-is-kubernetes-ingress-controller-fake-certificate) + +### Check if Rancher is Running + +Use `kubectl` to check the `cattle-system` system namespace and see if the Rancher pods are in a Running state. + +``` +kubectl -n cattle-system get pods + +NAME READY STATUS RESTARTS AGE +pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m +``` + +If the state is not `Running`, run a `describe` on the pod and check the Events. + +``` +kubectl -n cattle-system describe pod + +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost + Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt" + Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4" + Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4" + Normal Created 11m kubelet, localhost Created container + Normal Started 11m kubelet, localhost Started container +``` + +### Check the Rancher Logs + +Use `kubectl` to list the pods. + +``` +kubectl -n cattle-system get pods + +NAME READY STATUS RESTARTS AGE +pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m +``` + +Use `kubectl` and the pod name to list the logs from the pod. + +``` +kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh +``` + +### Cert CN is "Kubernetes Ingress Controller Fake Certificate" + +Use your browser to check the certificate details. If it says the Common Name is "Kubernetes Ingress Controller Fake Certificate", something may have gone wrong with reading or issuing your SSL cert. + +> **Note:** if you are using LetsEncrypt to issue certs it can sometimes take a few minuets to issue the cert. + +### Checking for issues with cert-manager issued certs (Rancher Generated or LetsEncrypt) + +`cert-manager` has 3 parts. + +- `cert-manager` pod in the `kube-system` namespace. +- `Issuer` object in the `cattle-system` namespace. +- `Certificate` object in the `cattle-system` namespace. + +Work backwards and do a `kubectl describe` on each object and check the events. You can track down what might be missing. + +For example there is a problem with the Issuer: + +``` +kubectl -n cattle-system describe certificate +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready +``` + +``` +kubectl -n cattle-system describe issuer +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found + Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found +``` + +### Checking for Issues with Your Own SSL Certs + +Your certs get applied directly to the Ingress object in the `cattle-system` namespace. + +Check the status of the Ingress object and see if its ready. + +``` +kubectl -n cattle-system describe ingress +``` + +If its ready and the SSL is still not working you may have a malformed cert or secret. + +Check the nginx-ingress-controller logs. Because the nginx-ingress-controller has multiple containers in its pod you will need to specify the name of the container. + +``` +kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller +... +W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found +``` + +### No matches for kind "Issuer" + +The [SSL configuration]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#choose-your-ssl-configuration) option you have chosen requires [cert-manager]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#optional-install-cert-manager) to be installed before installing Rancher or else the following error is shown: + +``` +Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" +``` + +Install [cert-manager]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#optional-install-cert-manager) and try installing Rancher again. + + +### Canal Pods show READY 2/3 + +The most common cause of this issue is port 8472/UDP is not open between the nodes. Check your local firewall, network routing or security groups. + +Once the network issue is resolved, the `canal` pods should timeout and restart to establish their connections. + +### nginx-ingress-controller Pods show RESTARTS + +The most common cause of this issue is the `canal` pods have failed to establish the overlay network. See [canal Pods show READY `2/3`](#canal-pods-show-ready-2-3) for troubleshooting. + + +### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) + +Some causes of this error include: + +* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: + +``` +$ ssh user@server +user@server$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` + +See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. + +* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. + +* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: +``` +$ nc xxx.xxx.xxx.xxx 22 +SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 +``` + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found + +The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain + +The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys + +If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. If the `SSH_AUTH_SOCK` environment variable is found in the environment where the `rke` command is run, it will be used automatically to connect to the node. + +### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? + +The node is not reachable on the configured `address` and `port`. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md new file mode 100644 index 00000000000..d2ec4366763 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/_index.md @@ -0,0 +1,237 @@ +--- +title: Upgrading Cert-Manager +weight: 2040 +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/#c-upgrade-rancher) under the upgrade Rancher section. + +## Upgrade Cert-Manager Only + +> **Note:** +> These instructions are applied if you have no plan to upgrade Rancher. + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions) + +In order to upgrade cert-manager, follow these instructions: + +{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} +1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) + + ```plain + helm delete --purge cert-manager + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager if needed + + ```plain + kubectl create namespace cert-manager + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +{{% /accordion %}} + +{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + The Helm 3 command is as follows: + + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + + The Helm 2 command is as follows: + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager (old and new) + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n cert-manager \ + delete deployment,sa,clusterrole,clusterrolebinding \ + -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f cert-manager/cert-manager-crd-old.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager + + ```plain + kubectl create namespace cert-manager + ``` + +1. Install cert-manager + + ```plain + kubectl -n cert-manager apply -R -f ./cert-manager + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +{{% /accordion %}} + + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). + diff --git a/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md new file mode 100644 index 00000000000..3ea49690f27 --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/upgrading-cert-manager/helm-2-instructions/_index.md @@ -0,0 +1,183 @@ +--- +title: Upgrading Cert-Manager with Helm 2 +weight: 2040 +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/#c-upgrade-rancher) under the upgrade Rancher section. + +## Upgrade Cert-Manager Only + +> **Note:** +> These instructions are applied if you have no plan to upgrade Rancher. + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +In order to upgrade cert-manager, follow these instructions: + +{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing deployment + + ```plain + helm delete --purge cert-manager + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml + ``` + +1. Label the kube-system namespace to disable resource validation + + ```plain + kubectl label namespace kube-system certmanager.k8s.io/disable-validation=true + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install --version 0.9.1 --name cert-manager --namespace kube-system jetstack/cert-manager + ``` +{{% /accordion %}} + +{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.9.1 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + ```plain + helm template ./cert-manager-v0.9.1.tgz --output-dir . \ + --name cert-manager --namespace kube-system \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + +1. Label the kube-system namespace to disable resource validation + + ```plain + kubectl label namespace kube-system certmanager.k8s.io/disable-validation=true + ``` + +1. Install cert-manager + + ```plain + kubectl -n kube-system apply -R -f ./cert-manager + ``` +{{% /accordion %}} + + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace kube-system + +NAME READY STATUS RESTARTS AGE +cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m +cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m +cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m +``` + +If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. + +> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: +> +> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) +> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/_index.md new file mode 100644 index 00000000000..e417c9260a5 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/_index.md @@ -0,0 +1,16 @@ +--- +title: Other Installation Methods +weight: 4 +--- + +### Docker Installations + +The [single-node Docker installation]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +### Air Gapped Installations + +Follow [these steps]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md new file mode 100644 index 00000000000..0ebdced73b0 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/_index.md @@ -0,0 +1,37 @@ +--- +title: Installing Rancher in an Air Gapped Environment +weight: 3 +aliases: + - /rancher/v2.x/en/installation/air-gap-installation/ + - /rancher/v2.x/en/installation/air-gap-high-availability/ + - /rancher/v2.x/en/installation/air-gap-single-node/ +--- + +This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +Throughout the installations instructions, there will be _tabs_ for either a high availability Kubernetes installation or a single-node Docker installation. + +### Air Gapped Kubernetes Installations + +This section covers how to install Rancher on a Kubernetes cluster in an air gapped environment. + +A Kubernetes install is composed of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Air Gapped Docker Installations + +These instructions also cover how to install Rancher on a single node in an air gapped environment. + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +# Installation Outline + +- [1. Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) +- [2. Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) +- [3. Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) +- [4. Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) + +### [Next: Prepare your Node(s)]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md new file mode 100644 index 00000000000..c12809c0695 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/_index.md @@ -0,0 +1,320 @@ +--- +title: 4. Install Rancher +weight: 400 +aliases: + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/ + - /rancher/v2.x/en/installation/air-gap-single-node/install-rancher + - /rancher/v2.x/en/installation/air-gap/install-rancher +--- + +This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + +> **Note:** These installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/installation/options/air-gap-helm2) provides a copy of the older air gap installation instructions for Rancher installed on Kubernetes with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher in five parts: + +- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) +- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) +- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) +- [D. Install Rancher](#d-install-rancher) +- [E. For Rancher versions prior to v2.3.0, Configure System Charts](#e-for-rancher-versions-prior-to-v2-3-0-configure-system-charts) + +### A. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + ```plain + helm init -c + ``` + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +```plain +helm fetch rancher-/rancher +``` + +> Want additional options? Need help troubleshooting? See [Kubernetes Install: Advanced Options]({{}}/rancher/v2.x/en/installation/k8s-install/helm-rancher/#advanced-configurations). + +### B. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.x/en/installation/options/chart-options/#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | + +### C. Render the Rancher Helm Template + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. + +{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + +1. From a system connected to the internet, add the cert-manager repo to Helm. + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` +1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + + ```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} + +{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +Then refer to [Adding TLS Secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +{{% /accordion %}} + +### D. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you chose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +{{% accordion id="install-cert-manager" label="Self-Signed Certificate Installs - Install Cert-manager" %}} + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +{{% /accordion %}} + +Install Rancher: + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` + +**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. + +### E. For Rancher versions prior to v2.3.0, Configure System Charts + +If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/options/chart-options/) +- [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/options/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) + +{{% /tab %}} +{{% tab "Docker Install" %}} + +The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +> **Do you want to...** +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/options/custom-ca-root-certificate/). +> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#api-audit-log). + +- For Rancher prior to v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher prior to v2.3.0.]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0) + +Choose from the following options: + +{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} +{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#cert-order). + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} +{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.x/en/installation/options/server-tags/) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +{{% /accordion %}} + +If you are installing Rancher v2.3.0+, the installation is complete. + +If you are installing Rancher versions prior to v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md new file mode 100644 index 00000000000..36f56180c1a --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md @@ -0,0 +1,84 @@ +--- +title: '3. Install Kubernetes with RKE (Kubernetes Installs Only)' +weight: 300 +aliases: + - /rancher/v2.x/en/installation/air-gap-high-availability/install-kube +--- + +This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. + +Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{}}/rke/latest/en/installation/) and create a RKE config file. + +- [A. Create an RKE Config File](#a-create-an-rke-config-file) +- [B. Run RKE](#b-run-rke) +- [C. Save Your Files](#c-save-your-files) + +### A. Create an RKE Config File + +From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts) you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). + +
RKE Options
+ +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gap network. | +| `user` | ✓ | A user that can run docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### B. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### C. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +### Issues or errors? + +See the [Troubleshooting]({{}}/rancher/v2.x/en/installation/options/troubleshooting/) page. + +### [Next: Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md new file mode 100644 index 00000000000..cc490aa4ca6 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md @@ -0,0 +1,279 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +aliases: + - /rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/ + - /rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ +--- + +> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. +> +> **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. + +By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.x/en/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed for a Windows cluster. + +{{% tabs %}} +{{% tab "Linux Only Clusters" %}} + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +A. Find the required assets for your Rancher version
+B. Collect all the required images
+C. Save the images to your workstation
+D. Populate the private registry + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section (pictured above), download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images (For Kubernetes Installs using Rancher Generated Self-Signed Certificate) + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.9.1, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.9.1 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` +{{% /tab %}} +{{% tab "Linux and Windows Clusters" %}} + +_Available as of v2.3.0_ + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +### Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +A. Find the required assets for your Rancher version
+B. Save the images to your Windows Server workstation
+C. Prepare the Docker daemon
+D. Populate the private registry + +{{% accordion label="Collecting and Populating Windows Images into the Private Registry"%}} + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + + | Release File | Description | + | ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | + | `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | + | `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | + | `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + +### B. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + + ```plain + ./rancher-save-images.ps1 + ``` + + **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + +### C. Prepare the Docker daemon + +1. Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ```json + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + +### D. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. + +1. Using `powershell`, log into your private registry if required: + + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + + ```plain + ./rancher-load-images.ps1 --registry + ``` + +{{% /accordion %}} + +### Linux Steps + +The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +A. Find the required assets for your Rancher version
+B. Collect all the required images
+C. Save the images to your Linux workstation
+D. Populate the private registry + +{{% accordion label="Collecting and Populating Linux Images into the Private Registry" %}} + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section (pictured above), download the following files, which are required to install Rancher in an air gap environment: + + | Release File | Description | + | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | + | `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | + | `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | + | `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | + | `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images + +1. **For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + + 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.9.1, please see our [upgrade documentation]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.9.1 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + + 2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + + **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry + ``` + +{{% /accordion %}} + +{{% /tab %}} +{{% /tabs %}} + +### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/) + +### [Next: Docker Installs - Install Rancher]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md new file mode 100644 index 00000000000..791d25bfa4b --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md @@ -0,0 +1,104 @@ +--- +title: '1. Prepare your Node(s)' +weight: 100 +aliases: + - /rancher/v2.x/en/installation/air-gap-single-node/provision-host +--- + +This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + +# Prerequisites + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). + +### CLI Tools + +The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/options/helm-version) to choose a version of Helm to install Rancher. + +{{% /tab %}} +{{% tab "Docker Install" %}} + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). +{{% /tab %}} +{{% /tabs %}} + +# Set up Infrastructure + +{{% tabs %}} +{{% tab "Kubernetes Install (Recommended)" %}} + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
+ +![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) + +### A. Provision three air gapped Linux hosts according to our requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +### B. Set up your Load Balancer + +When setting up the Kubernetes cluster that will run the Rancher server components, an Ingress controller pod will be deployed on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. + +You will need to configure a load balancer as a basic Layer 4 TCP forwarder to direct traffic to these ingress controller pods. The exact configuration will vary depending on your environment. + +> **Important:** +> Only use this load balancer (i.e, the `local` cluster Ingress) to load balance the Rancher server. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. + +**Load Balancer Configuration Samples:** + +- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx) +- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nlb) + +{{% /tab %}} +{{% tab "Docker Install" %}} + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +### A. Provision a single, air gapped Linux host according to our Requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.x/en/installation/requirements). + +{{% /tab %}} +{{% /tabs %}} + +### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md new file mode 100644 index 00000000000..0f6386a6f62 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/_index.md @@ -0,0 +1,152 @@ +--- +title: Installing Rancher on a Single Node Using Docker +description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. +weight: 1 +aliases: + - /rancher/v2.x/en/installation/single-node-install/ + - /rancher/v2.x/en/installation/single-node + - /rancher/v2.x/en/installation/other-installation-methods/single-node +--- + +For development and testing environments only, Rancher can be installed by running a single Docker container. + +In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. + +> **Want to use an external load balancer?** +> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb) instead. + +# Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + +# 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your Rancher server. + +# 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to...** +> +> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) +> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.x/en/installation/air-gap-single-node/) +> - Record all transactions with the Rancher API? See [API Auditing](#api-audit-log) + +Choose from the following options: + +### Option A: Default Rancher-generated Self-signed Certificate + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the minimum installation command below. + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest +``` + +### Option B: Bring Your Own Certificate, Self-signed +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in [PEM format](#pem). +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| ------------------- | --------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest +``` + +### Option C: Bring Your Own Certificate, Signed by a Recognized CA + +In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisites:** +> +> - The certificate files must be in [PEM format](#pem). +> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +After obtaining your certificate, run the Docker command below. + +- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. +- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +| Placeholder | Description | +| ------------------- | ----------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + rancher/rancher:latest \ + --no-cacerts +``` + +### Option D: Let's Encrypt Certificate + +> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. + +> **Prerequisites:** +> +> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. +> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. + +| Placeholder | Description | +| ----------------- | ------------------- | +| `` | Your domain address | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest \ + --acme-domain +``` + +## Advanced Options + +When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: + +- Custom CA Certificate +- API Audit Log +- TLS Settings +- Air Gap +- Persistent Data +- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +Refer to [this page](./advanced) for details. + +## Troubleshooting + +Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. + +## What's Next? + +- **Recommended:** Review [Single Node Backup and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/). diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md new file mode 100644 index 00000000000..3aa2362b502 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/_index.md @@ -0,0 +1,93 @@ +--- +title: Advanced Options for Docker Installs +weight: 5 +--- + +When installing Rancher, there are several [advanced options]({{}}/rancher/v2.x/en/installation/options/) that can be enabled: + +- [Custom CA Certificate](#custom-ca-certificate) +- [API Audit Log](#api-audit-log) +- [TLS Settings](#tls-settings) +- [Air Gap](#air-gap) +- [Persistent Data](#persistent-data) +- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) + +### Custom CA Certificate + +If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. + +Use the command example to start a Rancher container with your private CA certificates mounted. + +- The volume flag (`-v`) should specify the host directory containing the CA root certificates. +- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. +- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. +- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. + +The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /host/certs:/container/certs \ + -e SSL_CERT_DIR="/container/certs" \ + rancher/rancher:latest +``` + +### API Audit Log + +The API Audit Log records all the user and system transactions made through Rancher server. + +The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. + +See [API Audit Log]({{}}/rancher/v2.x/en/installation/api-auditing) for more information and options. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /var/log/rancher/auditlog:/var/log/auditlog \ + -e AUDIT_LEVEL=1 \ + rancher/rancher:latest +``` + +### TLS settings + +_Available as of v2.1.7_ + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_TLS_MIN_VERSION="1.0" \ + rancher/rancher:latest +``` + +See [TLS settings]({{}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. + +### Air Gap + +If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +{{< persistentdata >}} + +### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. + +If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. + +Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. + +To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: + +``` +docker run -d --restart=unless-stopped \ + -p 8080:80 -p 8443:443 \ + rancher/rancher:latest +``` diff --git a/content/rancher/v2.x/en/installation/single-node/proxy/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md similarity index 53% rename from content/rancher/v2.x/en/installation/single-node/proxy/_index.md rename to content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md index 9a82c8bbdc2..31fa3f9dedb 100644 --- a/content/rancher/v2.x/en/installation/single-node/proxy/_index.md +++ b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/_index.md @@ -2,27 +2,29 @@ title: HTTP Proxy Configuration weight: 251 aliases: -- /rancher/v2.x/en/installation/proxy-configuration/ + - /rancher/v2.x/en/installation/proxy-configuration/ + - /rancher/v2.x/en/installation/single-node/proxy --- + If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. -Environment variable | Purpose ---------------------------|--------- -HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) -HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) -NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) +| Environment variable | Purpose | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | +| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | +| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | > **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. -## Single Node Installation +## Docker Installation -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/single-node-install/) are: +Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node-install/) are: -* `localhost` -* `127.0.0.1` -* `0.0.0.0` +- `localhost` +- `127.0.0.1` +- `0.0.0.0` The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. diff --git a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb/_index.md similarity index 62% rename from content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md rename to content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb/_index.md index a6d832a5a2c..cbc9d67ab9f 100644 --- a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md +++ b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb/_index.md @@ -1,15 +1,21 @@ --- -title: Single Node Install with External Load Balancer +title: Docker Install with External Load Balancer weight: 252 aliases: -- /rancher/v2.x/en/installation/single-node-install-external-lb/ + - /rancher/v2.x/en/installation/single-node/single-node-install-external-lb/ --- + For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer 7 Nginx load balancer. ->**Want to skip the external load balancer?** -> See [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/single-node) instead. +> **Want to skip the external load balancer?** +> See [Docker Installation]({{}}/rancher/v2.x/en/installation/single-node) instead. + +## Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) ## Installation Outline + - [1. Provision Linux Host](#1-provision-linux-host) @@ -20,71 +26,71 @@ For development and testing environments that have a special requirement to term ## 1. Provision Linux Host -Provision a single Linux host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements) to launch your {{< product >}} Server. +Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.x/en/installation/requirements) to launch your {{< product >}} Server. ## 2. Choose an SSL Option and Install Rancher For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. ->**Do you want to...** +> **Do you want to...** > ->- Complete an Air Gap Installation? ->- Record all transactions with the Rancher API? +> - Complete an Air Gap Installation? +> - Record all transactions with the Rancher API? > ->See [Advanced Options](#advanced-options) below before continuing. +> See [Advanced Options](#advanced-options) below before continuing. Choose from the following options: {{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. ->**Prerequisites:** ->Create a self-signed certificate. +> **Prerequisites:** +> Create a self-signed certificate. > ->- The certificate files must be in [PEM format](#pem). +> - The certificate files must be in [PEM format](#pem). **To Install Rancher Using a Self-Signed Cert:** 1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest + ``` {{% /accordion %}} {{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} If your cluster is public facing, it's best to use a certificate signed by a recognized CA. ->**Prerequisites:** +> **Prerequisites:** > ->- The certificate files must be in [PEM format](#pem). +> - The certificate files must be in [PEM format](#pem). **To Install Rancher Using a Cert Signed by a Recognized CA:** If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. -1. Enter the following command. +1. Enter the following command. - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` -{{% /accordion %}} + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest --no-cacerts + ``` + + {{% /accordion %}} ## 3. Configure Load Balancer -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: - https` header, this redirect is disabled. +When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. The load balancer or proxy has to be configured to support the following: -* **WebSocket** connections -* **SPDY** / **HTTP/2** protocols -* Passing / setting the following headers: +- **WebSocket** connections +- **SPDY** / **HTTP/2** protocols +- Passing / setting the following headers: | Header | Value | Description | |--------|-------|-------------| @@ -92,17 +98,15 @@ The load balancer or proxy has to be configured to support the following: | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. - - ### Example Nginx configuration This NGINX configuration is tested on NGINX 1.14. - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). +> **Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). -* Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -* Replace both occurences of `FQDN` to the DNS name for Rancher. -* Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. +- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. ``` worker_processes 4; @@ -155,8 +159,8 @@ http { ## What's Next? -- **Recommended:** Review [Single Node Backup and Restoration]({{< baseurl >}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). +- **Recommended:** Review [Single Node Backup and Restoration]({{}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.x/en/cluster-provisioning/).
@@ -168,21 +172,21 @@ http { ### API Auditing -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. +If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ + -e AUDIT_LEVEL=1 \ + -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ + -e AUDIT_LOG_MAXAGE=20 \ + -e AUDIT_LOG_MAXBACKUP=20 \ + -e AUDIT_LOG_MAXSIZE=100 \ ### Air Gap -If you are visiting this page to complete an [Air Gap Installation]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. +If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.x/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. **Example:** - /rancher/rancher:latest + /rancher/rancher:latest ### Persistent Data @@ -190,7 +194,7 @@ If you are visiting this page to complete an [Air Gap Installation]({{< baseurl This layer 7 Nginx configuration is tested on Nginx version 1.13 (mainline) and 1.14 (stable). - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). +> **Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). ``` upstream rancher { @@ -232,37 +236,3 @@ server {
-## What's Next? - -- **Recommended:** Review [Single Node Backup and Restoration]({{< baseurl >}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_single >}} - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -{{< persistentdata >}} diff --git a/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md new file mode 100644 index 00000000000..3b442e08f57 --- /dev/null +++ b/content/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md @@ -0,0 +1,6 @@ +--- +title: FAQ and Troubleshooting +weight: 4 +--- + +{{< ssl_faq_single >}} diff --git a/content/rancher/v2.x/en/installation/requirements/_index.md b/content/rancher/v2.x/en/installation/requirements/_index.md index ee88adcff1f..65c9a808a6a 100644 --- a/content/rancher/v2.x/en/installation/requirements/_index.md +++ b/content/rancher/v2.x/en/installation/requirements/_index.md @@ -1,94 +1,212 @@ --- -title: Node Requirements +title: Installation Requirements +description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup weight: 1 -aliases: --- -Whether you're configuring Rancher to run in a single-node or high-availability setup, each node running Rancher Server must meet the following requirements. +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) which will run your apps and services. + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Docker Requirements](#operating-systems-and-docker-requirements) +- [Hardware Requirements](#hardware-requirements) + - [CPU and Memory](#cpu-and-memory) + - [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices/deployment-types/) + +The Rancher UI works best in Firefox or Chrome. + +# Operating Systems and Docker Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. + +Rancher has been tested and is supported with Ubuntu, CentOS, Oracle Linux, RancherOS, and RedHat Enterprise Linux. + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. This [how-to guide]({{}}/rancher/v2.x/en/installation/options/firewall) shows how to check the default firewall rules and how to open the ports with `firewalld` if necessary. + +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.x/en/installation/options/arm64-platform/) + +### Installing Docker + +Docker can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.x/en/installation/requirements/installing-docker) to install Docker with one command. + +# Hardware Requirements + +This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. + +### CPU and Memory + +Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher with Docker or on a Kubernetes cluster. {{% tabs %}} -{{% tab "Operating Systems and Docker" %}} -
-Rancher is tested on the following operating systems and their subsequent non-major releases with a supported version of [Docker](https://www.docker.com/). +{{% tab "Nodes in Kubernetes Install" %}} -* Ubuntu 16.04 (64-bit x86) - * Docker 17.03.x, 18.06.x, 18.09.x -* Ubuntu 18.04 (64-bit x86) - * Docker 18.06.x, 18.09.x -* Red Hat Enterprise Linux (RHEL)/CentOS 7.6 (64-bit x86) - * RHEL Docker 1.13 - * Docker 17.03.x, 18.06.x, 18.09.x -* RancherOS 1.5.1 (64-bit x86) - * Docker 17.03.x, 18.06.x, 18.09.x -* Windows Server 2019 (64-bit x86) - * Docker 18.09 - * _Experimental, see [Configuring Custom Clusters for Windows]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/)_ +These requirements apply to [installing Rancher on a Kubernetes cluster.]({{}}/rancher/v2.x/en/installation/k8s-install/) -If you are using RancherOS, make sure you switch the Docker engine to a supported version using:
-``` -# Look up available versions -sudo ros engine list - -# Switch to a supported version -sudo ros engine switch docker-18.09.2 -``` -See [Running on ARM64 (Experimental)]({{< baseurl >}}/rancher/v2.x/en/installation/arm64-platform/) if you plan to run Rancher on ARM64. -
-
-[Docker Documentation: Installation Instructions](https://docs.docker.com/) -
-
-{{% /tab %}} -{{% tab "Hardware" %}} -
-Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. - - -**[HA Node]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/) Requirements** - -Deployment Size | Clusters | Nodes | vCPUs | RAM | ---- | --- | --- | --- | --- | -Small | Up to 5 | Up to 50 | 2 | 8 GB | -Medium | Up to 15 | Up to 200 | 4 | 16 GB | -Large | Up to 50 | Up to 500 | 8 | 32 GB | -X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | -XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | - -
- -**[Single Node]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/) Requirements** - -Deployment Size | Clusters | Nodes | vCPUs | RAM | ---- | --- | --- | --- | --- | -Small | Up to 5 | Up to 50 | 1 | 4 GB | -Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -
- -**Disks** - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPs. In larger clusters consider using dedicated storage devices for etcd data and wal directories. - -
+| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | +| Small | Up to 5 | Up to 50 | 2 | 8 GB | +| Medium | Up to 15 | Up to 200 | 4 | 16 GB | +| Large | Up to 50 | Up to 500 | 8 | 32 GB | +| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | +| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | {{% /tab %}} -{{% tab "Networking" %}} -
+{{% tab "Node in Docker Install" %}} -### Node IP Address +These requirements apply to [single node]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker) installations of Rancher. -Each node used (either for the Single Node Install, High Availability (HA) Install or nodes that are used in clusters) should have a static IP configured. In case of DHCP, the nodes should have a DHCP reservation to make sure the node gets the same IP allocated. +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + +{{% /tab %}} +{{% /tabs %}} + +### Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. ### Port Requirements -When deploying Rancher in an HA cluster, certain ports on your nodes must be open to allow communication with Rancher. The ports that must be open change according to the type of machines hosting your cluster nodes. For example, if your are deploying Rancher on nodes hosted by an infrastructure, port `22` must be open for SSH. The following diagram depicts the ports that are opened for each [cluster type]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning). +This section describes the port requirements for nodes running the `rancher/rancher` container. -
Cluster Type Port Requirements
+The port requirements are different depending on whether you are installing Rancher on a single node or on a high-availability Kubernetes cluster. -![Basic Port Requirements]({{< baseurl >}}/img/rancher/port-communications.svg) +- **For a Docker installation,** you only need to open the ports required to enable Rancher to communicate with downstream user clusters. +- **For a high-availability installation,** the same ports need to be opened, as well as additional ports required to set up the Kubernetes cluster that Rancher is installed on. -{{< requirements_ports_rancher >}} -{{< requirements_ports_rke >}} -{{< ports_aws_securitygroup_nodedriver >}} +{{% tabs %}} +{{% tab "Kubernetes Install Port Requirements" %}} +### Ports for Communication with Downstream Clusters + +To communicate with downstream clusters, Rancher requires different ports to be open depending on the infrastructure you are using. + +For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. + +The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). + +
Port Requirements for the Rancher Management Plane
+ +![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) + +The following tables break down the port requirements for inbound and outbound traffic: + +
Inbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +| -------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | + +
Outbound Rules for Rancher Nodes
+ +| Protocol | Port | Destination | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). + +### Additional Port Requirements for Nodes in an HA/Kubernetes Cluster + +You will need to open additional ports to launch the Kubernetes cluster that are required for a high-availability installation of Rancher. + +If you follow the Rancher installation documentation for setting up a Kubernetes cluster using RKE, you will set up a cluster in which all three nodes have all three roles: etcd, controlplane, and worker. In that case, you can refer to this list of requirements for each node with all three roles: + +
Inbound Rules for Nodes with All Three Roles: etcd, Controlplane, and Worker
+ +Protocol | Port | Source | Description +-----------|------|----------|-------------- +TCP | 22 | Linux worker nodes only, and any network that you want to be able to remotely access this node from. | Remote access over SSH +TCP | 80 | Any source that consumes Ingress services | Ingress controller (HTTP) +TCP | 443 | Any source that consumes Ingress services | Ingress controller (HTTPS) +TCP | 2376 | Rancher nodes | Docker daemon TLS port used by Docker Machine (only needed when using Node Driver/Templates) +TCP | 2379 | etcd nodes and controlplane nodes | etcd client requests +TCP | 2380 | etcd nodes and controlplane nodes | etcd peer communication +TCP | 3389 | Windows worker nodes only, and any network that you want to be able to remotely access this node from. | Remote access over RDP +TCP | 6443 | etcd nodes, controlplane nodes, and worker nodes | Kubernetes apiserver +UDP | 8472 | etcd nodes, controlplane nodes, and worker nodes | Canal/Flannel VXLAN overlay networking +TCP | 9099 | the node itself (local traffic, not across nodes) | Canal/Flannel livenessProbe/readinessProbe +TCP | 10250 | controlplane nodes | kubelet +TCP | 10254 | the node itself (local traffic, not across nodes) | Ingress controller livenessProbe/readinessProbe +TCP/UDP | 30000-32767 | Any source that consumes NodePort services | NodePort port range + +
Outbound Rules for Nodes with All Three Roles: etcd, Controlplane, and Worker
+ +Protocol | Port | Source | Destination | Description +-----------|------|----------|---------------|-------------- +TCP | 22 | RKE node | Any node configured in Cluster Configuration File | SSH provisioning of node by RKE +TCP | 443 | Rancher nodes | Rancher agent | +TCP | 2379 | etcd nodes | etcd client requests | +TCP | 2380 | etcd nodes | etcd peer communication | +TCP | 6443 | RKE node | controlplane nodes | Kubernetes API server +TCP | 6443 | controlplane nodes | Kubernetes API server | +UDP | 8472 | etcd nodes, controlplane nodes, and worker nodes | Canal/Flannel VXLAN overlay networking | +TCP | 9099 | the node itself (local traffic, not across nodes) | Canal/Flannel livenessProbe/readinessProbe | +TCP | 10250 | etcd nodes, controlplane nodes, and worker nodes | kubelet | +TCP | 10254 | the node itself (local traffic, not across nodes) | Ingress controller livenessProbe/readinessProbe + +The ports that need to be opened for each node depend on the node's Kubernetes role: etcd, controlplane, or worker. If you installed Rancher on a Kubernetes cluster that doesn't have all three roles on each node, refer to the [port requirements for the Rancher Kubernetes Engine (RKE).]({{}}/rke/latest/en/os/#ports) The RKE docs show a breakdown of the port requirements for each role. +{{% /tab %}} +{{% tab "Single Node Port Requirements" %}} +### Ports for Communication with Downstream Clusters + +To communicate with downstream clusters, Rancher requires different ports to be open depending on the infrastructure you are using. + +For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. + +The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). + +
Port Requirements for the Rancher Management Plane
+ +![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) + +The following tables break down the port requirements for inbound and outbound traffic: + +**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). + + +
Inbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +| -------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | + + +
Outbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). {{% /tab %}} {{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md b/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md new file mode 100644 index 00000000000..ac20da0afe8 --- /dev/null +++ b/content/rancher/v2.x/en/installation/requirements/installing-docker/_index.md @@ -0,0 +1,18 @@ +--- +title: Installing Docker +weight: 1 +--- + +Docker is required to be installed on any node that runs the Rancher server. + +There are a couple of options for installing Docker. One option is to refer to the [official Docker documentation](https://docs.docker.com/install/) about how to install Docker on Linux. The steps will vary based on the Linux distribution. + +Another option is to use one of Rancher's Docker installation scripts, which are available for most recent versions of Docker. + +For example, this command could be used to install Docker 18.09 on Ubuntu: + +``` +curl https://releases.rancher.com/install-docker/18.09.sh | sh +``` + +To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/references/_index.md b/content/rancher/v2.x/en/installation/requirements/ports/_index.md similarity index 72% rename from content/rancher/v2.x/en/installation/references/_index.md rename to content/rancher/v2.x/en/installation/requirements/ports/_index.md index 9ccafe15b9c..7a2a7ec8dce 100644 --- a/content/rancher/v2.x/en/installation/references/_index.md +++ b/content/rancher/v2.x/en/installation/requirements/ports/_index.md @@ -1,21 +1,22 @@ --- title: Port Requirements +description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes weight: 300 -aliases: - - /rancher/v2.x/en/hosts/amazon/#required-ports-for-rancher-to-work/ --- -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and Kubernetes cluster nodes. +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. ## Rancher Nodes -The following table lists the ports that need to be open to and from nodes that are running the Rancher server container for [single node installs]({{< baseurl >}}/rancher/v2.x/en/installation/single-node-install/) or pods for [high availability installs]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install/). +The following table lists the ports that need to be open to and from nodes that are running the Rancher server container for [Docker installs]({{}}/rancher/v2.x/en/installation/single-node-install/) or pods for [installing Rancher on Kubernetes]({{}}/rancher/v2.x/en/installation/k8s-install/). {{< ports-rancher-nodes >}} -## Kubernetes Cluster Nodes +**Note** Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -The ports required to be open for cluster nodes changes depending on how the cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster creation options]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). +## Downstream Kubernetes Cluster Nodes + +The ports required to be open for cluster nodes changes depending on how the cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). >**Tip:** > @@ -25,7 +26,7 @@ The ports required to be open for cluster nodes changes depending on how the clu {{% tab "Node Pools" %}} -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). >**Note:** >The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. @@ -36,7 +37,7 @@ The following table depicts the port requirements for [Rancher Launched Kubernet {{% tab "Custom Nodes" %}} -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). {{< ports-custom-nodes >}} @@ -44,7 +45,7 @@ The following table depicts the port requirements for [Rancher Launched Kubernet {{% tab "Hosted Clusters" %}} -The following table depicts the port requirements for [hosted clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). +The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). {{< ports-imported-hosted >}} @@ -52,7 +53,7 @@ The following table depicts the port requirements for [hosted clusters]({{< base {{% tab "Imported Clusters" %}} -The following table depicts the port requirements for [imported clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). +The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). {{< ports-imported-hosted >}} @@ -98,7 +99,7 @@ In these cases, you have to explicitly allow this traffic in your host firewall, ### Rancher AWS EC2 security group -When using the [AWS EC2 node driver]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. +When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. | Type | Protocol | Port Range | Source/Destination | Rule Type | |-----------------|:--------:|:-----------:|------------------------|:---------:| @@ -112,6 +113,6 @@ When using the [AWS EC2 node driver]({{< baseurl >}}/rancher/v2.x/en/cluster-pro | Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | | Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | | Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 30000-32767 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 30000-32767 | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | | All traffic | All | All | 0.0.0.0/0 | Outbound | diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md deleted file mode 100644 index 2b5a412831a..00000000000 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Choosing a Version -weight: 230 ---- - -## Single Node Installs - -When performing [single-node installs]({{< baseurl >}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -Tag | Description --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- -`rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. -`rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. -`rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. - - - - ->**Notes:** -> ->- The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. ->- Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). -> -> _Caveat:_ Alpha releases cannot be upgraded to or from any other release. - - -## High Availability Installs - -When installing, upgrading, or rolling back Rancher Server in a [high availability configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a single node installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -Type | Command to Add the Repo | Description of the Repo ------------|-----|------------- -rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. -rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. -rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` prior to v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). - -For the Rancher v2.1.x versions, there were some Helm charts, that were using a version that was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - ->**Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ``` - helm repo list - - NAME URL - stable https://kubernetes-charts.storage.googleapis.com - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ``` - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/) from the new Helm chart repository. diff --git a/content/rancher/v2.x/en/installation/single-node/_index.md b/content/rancher/v2.x/en/installation/single-node/_index.md deleted file mode 100644 index 2b0a0f5e51a..00000000000 --- a/content/rancher/v2.x/en/installation/single-node/_index.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: Single Node Install -weight: 250 -aliases: - - /rancher/v2.x/en/installation/single-node-install/ ---- -For development and testing environments, we recommend installing Rancher by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. - ->**Want to use an external load balancer?** -> See [Single Node Install with an External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/single-node-install-external-lb) instead. - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements) to launch your {{< product >}} Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - ->**Do you want to...** -> ->- Use a proxy? See [HTTP Proxy Configuration]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/proxy/) ->- Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{< baseurl >}}/rancher/v2.x/en/admin-settings/custom-ca-root-certificate/) ->- Complete an Air Gap Installation? See [Air Gap: Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/) ->- Record all transactions with the Rancher API? See [API Auditing](#api-audit-log) -> - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the minimum installation command below. - - - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest - - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - ->**Prerequisites:** ->Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> ->- The certificate files must be in [PEM format](#pem). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -- Replace `` with the directory path to your certificate file. -- Replace ``,``, and `` with your certificate names. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest -``` -{{% /accordion %}} -{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - ->**Prerequisite:** The certificate files must be in [PEM format](#pem). - -After obtaining your certificate, run the Docker command below. - -- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - - - Replace `` with the directory path to your certificate file. - - Replace `` and `` with your certificate names. - -- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - rancher/rancher:latest --no-cacerts -``` -{{% /accordion %}} -{{% accordion id="option-d" label="Option D-Let's Encrypt Certificate" %}} - -For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. - ->**Prerequisites:** -> ->- Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. ->- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). ->- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - - -After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. Replace `` with your domain. - - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest \ - --acme-domain - ->**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). -{{% /accordion %}} - -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restoration]({{< baseurl >}}/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/). - -
- -## Advanced Options - -When installing Rancher, there are several [advanced options]({{< baseurl >}}/rancher/v2.x/en/installation/options/) that can be enabled. - -### Custom CA Certificate - -If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. - -Use the command example to start a Rancher container with your private CA certificates mounted. - -- The volume option (`-v`) should specify the host directory containing the CA root certificates. -- The `e` flag in combination with `SSL_CERT_DIR` declares an environment variable that specifies the mounted CA root certificates directory location inside the container. - - Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. - - Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. - -The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /host/certs:/container/certs \ - -e SSL_CERT_DIR="/container/certs" \ - rancher/rancher:latest -``` - -### API Audit Log - -The API Audit Log records all the user and system transactions made through Rancher server. - -The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. - -See [API Audit Log]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) for more information and options. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /var/log/rancher/auditlog:/var/log/auditlog \ - -e AUDIT_LEVEL=1 \ - rancher/rancher:latest -``` - -### TLS settings - -_Available as of v2.1.7_ - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_TLS_MIN_VERSION="1.0" \ - rancher/rancher:latest -``` - -See [TLS settings]({{< baseurl >}}/rancher/v2.x/en/admin-settings/tls-settings) for more information and options. - -### Air Gap - -If you are visiting this page to complete an air gap installation, you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -{{< persistentdata >}} - -### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. - -If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. - -Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. - -To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: - -``` -docker run -d --restart=unless-stopped \ - -p 8080:80 -p 8443:443 \ - rancher/rancher:latest -``` - -## FAQ and Troubleshooting - -{{< ssl_faq_single >}} diff --git a/content/rancher/v2.x/en/k8s-in-rancher/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/_index.md index 1d79b6056fe..5b112b5725e 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/_index.md @@ -1,5 +1,5 @@ --- -title: Working in Projects +title: Kubernetes Resources, Registries and Pipelines weight: 3000 aliases: - /rancher/v2.x/en/concepts/ @@ -56,7 +56,7 @@ For more information, see [Service Discovery]({{< baseurl >}}/rancher/v2.x/en/k8 ## Pipelines -After your project has been [configured to a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/#version-control-providers), you can add the repositories and start configuring a pipeline for each repository. +After your project has been [configured to a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), you can add the repositories and start configuring a pipeline for each repository. For more information, see [Pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md index be9c46a198e..e4c3b501564 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/certificates/_index.md @@ -1,5 +1,6 @@ --- -title: SSL Certificates +title: Encrypting HTTP Communication +description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments weight: 3060 aliases: - /rancher/v2.x/en/tasks/projects/add-ssl-certificates/ @@ -13,7 +14,7 @@ Add SSL certificates to either projects, namespaces, or both. A project scoped c 1. From the **Global** view, select the project where you want to deploy your ingress. -1. From the main menu, select **Resources > Certificates**. Click **Add Certificate**. +1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. (For Rancher prior to v2.3, click **Resources > Certificates.**) 1. Enter a **Name** for the certificate. @@ -31,13 +32,13 @@ Add SSL certificates to either projects, namespaces, or both. A project scoped c 1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - Certifcate files end with an extension of `.crt`. + Certificate files end with an extension of `.crt`. **Result:** Your certificate is added to the project or namespace. You can now add it to deployments. - If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. - If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. -- Your certificate is added to the **Resources > Certificates** view. +- Your certificate is added to the **Resources > Secrets > Certificates** view. (For Rancher prior to v2.3, it is added to **Resources > Certificates.**) ## What's Next? diff --git a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md index 4831c0d9b86..faf95b4b847 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md @@ -34,7 +34,7 @@ ConfigMaps store general configuration information for an application, such as c > >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. > - > ![Bulk Key Value Pair Copy/Paste]({{< baseurl >}}/img/rancher/bulk-key-values.gif) + > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} **Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index ea95ace61d8..2301619cd7a 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -1,5 +1,6 @@ --- -title: Horizontal Pod Autoscaler +title: The Horizontal Pod Autoscaler +description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment weight: 3026 --- @@ -18,8 +19,8 @@ The way that you manage HPAs is different based on your version of the Kubernete HPAs are also managed differently based on your version of Rancher: -- **For Rancher Prior to v2.3.0-alpha5:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). -- **For Rancher v2.3.0-alpha5+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher Prior to v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). You might have additional HPA installation steps if you are using an older version of Rancher: @@ -28,7 +29,7 @@ You might have additional HPA installation steps if you are using an older versi ## Testing HPAs with a Service Deployment -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project's **HPA** tab. For more information, refer to [Get HPA Metrics and Status]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). +In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). \ No newline at end of file +({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md index de2393a8370..5a3af016138 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md @@ -3,7 +3,7 @@ title: Managing HPAs with the Rancher UI weight: 3028 --- -_Available as of v2.3.0-alpha5_ +_Available as of v2.3.0_ The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. @@ -13,7 +13,7 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. From the **Global** view, open the project that you want to deploy a HPA to. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. +1. Click **Resources > HPA.** 1. Click **Add HPA.** @@ -29,13 +29,13 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. Click **Create** to create the HPA. -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Workloads > HPA view. +> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. ## Get HPA Metrics and Status 1. From the **Global** view, open the project with the HPAs you want to look at. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. The **HPA** tab shows the number of current replicas. +1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. 1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. @@ -44,12 +44,12 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. From the **Global** view, open the project that you want to delete an HPA from. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. +1. Click **Resources > HPA.** 1. Find the HPA which you would like to delete. 1. Click **Ellipsis (...) > Delete**. -1. Click **Delete** to confim. +1. Click **Delete** to confirm. > **Result:** The HPA is deleted from the current cluster. \ No newline at end of file diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md index 70e11daac47..cb49344658d 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md @@ -60,7 +60,7 @@ spec: selector: app: hello-world ``` -{{% /accordion %}} +{{% /accordion %}} 1. Deploy it to your cluster. @@ -222,14 +222,14 @@ Use your load testing tool to scale up to two pods based on CPU Usage. # kubectl get pods ``` You should receive output similar to what follows: - ``` + ``` NAME READY STATUS RESTARTS AGE hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h ``` {{% /accordion %}} {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upspace to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. +Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. 1. Enter the following command. ``` @@ -312,7 +312,7 @@ Use your load testing to scale down to 1 pod when all metrics are below target f Use your load testing tool to upscale two pods based on CPU usage. 1. Enter the following command. - ``` + ``` # kubectl describe hpa ``` You should receive output similar to what follows. @@ -345,7 +345,7 @@ Use your load testing tool to upscale two pods based on CPU usage. # kubectl get pods ``` You should receive output similar to what follows. - ``` + ``` NAME READY STATUS RESTARTS AGE hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s hello-world-54764dfbf8-q6l82 1/1 Running 0 6h @@ -387,7 +387,7 @@ Use your load testing tool to scale up to three pods when the cpu_system usage l 1. Enter the following command to confirm three pods are running. ``` # kubectl get pods - ``` + ``` You should receive output similar to what follows: ``` # kubectl get pods @@ -443,7 +443,7 @@ Use your load testing tool to upscale to four pods based on CPU usage. `horizont hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s hello-world-54764dfbf8-q6l82 1/1 Running 0 6h ``` -{{% /accordion %}} +{{% /accordion %}} {{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. @@ -484,8 +484,8 @@ Use your load testing tool to scale down to one pod when all metrics below targe # kubectl get pods ``` You should receive output similar to what follows. - ``` + ``` NAME READY STATUS RESTARTS AGE hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` + ``` {{% /accordion %}} \ No newline at end of file diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md index c2821d5c62a..096c69a6c17 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md @@ -1,15 +1,16 @@ --- -title: Load Balancing and Ingresses +title: Set Up Load Balancer and Ingress Controller within Rancher +description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers weight: 3040 --- -Within Rancher, you can setup load balancers and ingress controllers to redirect service requests. +Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. +If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. Rancher supports two types of load balancers: @@ -33,18 +34,28 @@ Load Balancers have a couple of limitations you should be aware of: ## Ingress -As mentioned in the limitations above, using a load balancer per service can be expensive. You can get around this issue using an ingress. +As mentioned in the limitations above, the disadvantages of using a load balancer are: -Ingress is a set or rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. +- Load Balancers can only handle one IP address per service. +- If you run multiple services in your cluster, you must have a load balancer for each service. +- It can be expensive to have a load balancer for every service. -Your load balancer can either reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launcher clusters are powered by [Nginx](https://www.nginx.com/). +In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. + +Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. + +Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. + +Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. + +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launcher clusters are powered by [Nginx](https://www.nginx.com/). Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. >**Using Rancher in a High Availability Configuration?** > ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global load balancer for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. +>Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. -- For more information on how to setup ingress in Rancher, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). +- For more information on how to set up ingress in Rancher, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) - When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md index efa6b5d13f1..d90fc336f02 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md @@ -1,5 +1,6 @@ --- -title: Ingress +title: Adding Ingresses to Your Project +description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project weight: 3042 aliases: - /rancher/v2.x/en/tasks/workloads/add-ingress/ @@ -9,7 +10,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. From the **Global** view, open the project that you want to add ingress to. -1. Select the **Load Balancing** tab. Then click **Add Ingress**. +1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions prior to v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. 1. Enter a **Name** for the ingress. @@ -72,7 +73,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. Enter the **Host** using encrypted communication. - 1. To add additional hosts that use the certitificate, click **Add Hosts**. + 1. To add additional hosts that use the certificate, click **Add Hosts**. 1. **Optional:** Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md index 77afd316aa7..9edfc95f878 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md @@ -1,5 +1,6 @@ --- -title: Load Balancers +title: "Layer 4 and Layer 7 Load Balancing" +description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" weight: 3041 aliases: - /rancher/v2.x/en/concepts/load-balancing/ @@ -8,7 +9,11 @@ Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer ## Layer-4 Load Balancer -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. Layer-4 load balancer is supported by the underlying cloud provider. As a result, when you deploy RKE clusters on bare metal servers and vSphere clusters, layer-4 load balancer is not supported. +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. ### Support for Layer-4 Load Balancing @@ -16,13 +21,16 @@ Support for layer-4 load balancer varies based on the underlying cloud provider. Cluster Deployment | Layer-4 Load Balancer Support ----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Not Supported -RKE on vSphere | Not Supported -RKE on Custom Hosts
(e.g. bare-metal servers) | Not Supported +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) ## Layer-7 Load Balancer @@ -59,8 +67,8 @@ The benefit of using xip.io is that you obtain a working entrypoint URL immediat #### Tutorials -- [High Availability Installation with External Load Balancer (HTTPS/Layer 7)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install-external-lb) -- [High Availability Installation with External Load Balancer (TCP/Layer 4)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install) -- [Single Node Installation with External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/single-node-install-external-lb) +- [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install-external-lb) +- [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{< baseurl >}}/rancher/v2.x/en/installation/ha-server-install) +- [Docker Installation with External Load Balancer]({{< baseurl >}}/rancher/v2.x/en/installation/single-node-install-external-lb) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md index edf934e3ab4..e39437f23a7 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md @@ -11,7 +11,7 @@ aliases: >- Pipelines are new and improved for Rancher v2.1! Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. >- Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). -Before setting up any pipelines, review the [pipeline overview]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/) and ensure that the project has [configured authentication to your version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/#version-control-providers), e.g. GitHub, GitLab, Bitucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/) to view some common pipeline deployments. +Before setting up any pipelines, review the [pipeline overview]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/) and ensure that the project has [configured authentication to your version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#version-control-providers), e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/example/) to view some common pipeline deployments. If you can access a project, you can enable repositories to start building pipelines. Only an [administrator]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can authorize version control providers. @@ -41,7 +41,7 @@ After the version control provider is authorized, you are automatically re-direc 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Click on **Configure Repositories**. @@ -59,7 +59,7 @@ Now that repositories are added to your project, you can start configuring the p 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the repository that you want to set up a pipeline for. Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Throughout the next couple of steps, we'll provide the options of how to do pipeline configuration through the UI or the YAML file. @@ -99,7 +99,7 @@ If you haven't added any stages, click **Configure pipeline for this branch** to {{% /tab %}} {{% tab "By YAML" %}}
-For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the [advanced options](#advanced-options) to get all thhe details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. +For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the [advanced options](#advanced-options) to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. ```yaml # example @@ -145,7 +145,7 @@ _Available as of v2.2.0_ 1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. -1. If you don't have any existing [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. +1. If you don't have any existing [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. @@ -163,7 +163,7 @@ In the `notification` section, you will provide the following information: * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. * **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. +* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. ```yaml # Example @@ -181,7 +181,7 @@ notification: notifier: "c-wdcsr:n-c9pg7" - recipient: "test@example.com" notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent + # Select which statuses you want the notification to be sent condition: ["Failed", "Success", "Changed"] # Ability to override the default message (Optional) message: "my-message" @@ -231,9 +231,9 @@ timeout: 30 ## Running your Pipelines -Run your pipeline for the first time. From the **Pipeline** tab, find your pipeline and select the vertical **Ellipsis (...) > Run**. +Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions prior to v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **Ellipsis (...) > Run**. -During this initial run, your pipeline is tested, and the following [pipeline components]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/#how-pipelines-work) are deployed to your project as workloads in a new namespace dedicated to the pipeline: +During this initial run, your pipeline is tested, and the following [pipeline components]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines/#how-pipelines-work) are deployed to your project as workloads in a new namespace dedicated to the pipeline: - `docker-registry` - `jenkins` @@ -257,7 +257,7 @@ Available Events: 1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. 1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the repository that you want to modify the event triggers. Select the vertical **Ellipsis (...) > Setting**. @@ -309,7 +309,7 @@ stages:
{{% /tab %}} -{{% /tabs %}} +{{% /tabs %}} ### Build and Publish Images @@ -413,7 +413,7 @@ Under the `steps` section, add a step with `publishCatalogConfig`. You will prov * GitBranch: The git branch of the chart repository that the template will be published to. * GitAuthor: The author name used in the commit message. * GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protolcol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. +* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. ```yaml # example @@ -467,7 +467,7 @@ stages:
{{% /tab %}} -{{% /tabs %}} +{{% /tabs %}} ### Deploy Catalog App @@ -553,7 +553,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. 1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -563,7 +563,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - 1. **Optional:** Add more branches that trigger a build. + 1. **Optional:** Add more branches that trigger a build. 1. Click **Done.** @@ -571,7 +571,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. {{% tab "Stage Trigger" %}} 1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -581,7 +581,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. 1. In the **Trigger Rules** section, configure rules to run or skip the stage. - 1. Click **Add Rule**. + 1. Click **Add Rule**. 1. Choose the **Type** that triggers the stage and enter a value. @@ -596,7 +596,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. {{% tab "Step Trigger" %}} 1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -606,7 +606,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. 1. In the **Trigger Rules** section, configure rules to run or skip the step. - 1. Click **Add Rule**. + 1. Click **Add Rule**. 1. Choose the **Type** that triggers the step and enter a value. @@ -654,13 +654,13 @@ When configuring a pipeline, certain [step types](#step-types) allow you to use {{% tab "By UI" %}} 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the pipeline for which you want to edit build triggers, select **Ellipsis (...) > Edit Config**. 1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. -1. Click **Show advanced options**. +1. Click **Show advanced options**. 1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. @@ -703,13 +703,13 @@ Create a secret in the same project as your pipeline, or explicitly in the names {{% tab "By UI" %}} 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the pipeline for which you want to edit build triggers, select **Ellipsis (...) > Edit Config**. 1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. -1. Click **Show advanced options**. +1. Click **Show advanced options**. 1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md index 7c2136b481f..00ddc2f207f 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md @@ -11,7 +11,7 @@ Rancher ships with several example repositories that you can use to familiarize - Maven - php -> **Note**: The example repositories are only available if you have not [configured a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines). +> **Note:** The example repositories are only available if you have not [configured a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines). ## Configure Repositories @@ -19,7 +19,7 @@ By default, the example pipeline repositories are disabled. Enable one (or more) 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Click **Configure Repositories**. @@ -45,7 +45,7 @@ After enabling an example repository, review the pipeline to see how it is set u 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the example repository, select the vertical **Ellipsis (...)**. There are two ways to view the pipeline: * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. @@ -57,7 +57,7 @@ After enabling an example repository, run the pipeline to see how it works. 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the example repository, select the vertical **Ellipsis (...) > Run**. @@ -67,4 +67,4 @@ After enabling an example repository, run the pipeline to see how it works. ## What's Next? -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). +For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/pipelines), [enable a repository](#configure-repositories) and finally [configure your pipeline]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md index 2134780953a..0b756ed4de9 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md @@ -7,7 +7,7 @@ aliases: Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. -In the [pipeline configuration docs](), we provide examples of each available feature within pipelines. Here is a full example for those who want to jump rigt in. +In the [pipeline configuration docs](), we provide examples of each available feature within pipelines. Here is a full example for those who want to jump right in. ```yaml # example diff --git a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md index 7a2b342191c..b24f7e97cff 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md @@ -1,5 +1,6 @@ --- -title: Registries +title: Kubernetes Registry and Docker Registry +description: Learn about the Docker registry and Kubernetes registry, their use cases and how to use a private registry with the Rancher UI weight: 3063 aliases: - /rancher/v2.x/en/tasks/projects/add-registries/ @@ -15,11 +16,15 @@ Deployments use the Kubernetes registry secret to authenticate with a private Do Currently, deployments pull the private registry credentials automatically only if the workload is created in the Rancher UI and not when it is created via kubectl. +# Creating a Registry + >**Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. 1. From the **Global** view, select the project containing the namespace(s) where you want to add a registry. -1. From the main menu, select **Resources > Registries**. Click **Add Registry**. +1. From the main menu, click **Resources > Secrets > Registry Credentials.** (For Rancher prior to v2.3, click **Resources > Registries.)** + +1. Click **Add Registry.** 1. Enter a **Name** for the registry. @@ -37,7 +42,7 @@ Currently, deployments pull the private registry credentials automatically only - You can view the secret in the Rancher UI from the **Resources > Registries** view. - Any workload that you create in the Rancher UI will have the credentials to access the registry if the workload is within the registry's scope. -## Using a Private Registry +# Using a Private Registry You can deploy a workload with an image from a private registry through the Rancher UI, or with `kubectl`. @@ -46,7 +51,7 @@ You can deploy a workload with an image from a private registry through the Ranc To deploy a workload with an image from your private registry, 1. Go to the project view, -1. Go to the **Workloads** tab. +1. Click **Resources > Workloads.** In versions prior to v2.3.0, go to the **Workloads** tab. 1. Click **Deploy.** 1. Enter a unique name for the workload and choose a namespace. 1. In the **Docker Image** field, enter the URL of the path to the Docker image in your private registry. For example, if your private registry is on Quay.io, you could use `quay.io//`. @@ -110,4 +115,4 @@ The result should look like this: 10s Normal Pulled Pod Successfully pulled image "quay.io//" ``` -For more information, refer to the Kubernetes documentation on [creating a pod that uses your secret.](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) \ No newline at end of file +For more information, refer to the Kubernetes documentation on [creating a pod that uses your secret.](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md index 5b958d9dcb5..b6f31611d9e 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md @@ -7,9 +7,13 @@ aliases: [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. +> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.x/en/k8s-in-rancher/registries) + When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. ->**Note:** Any update to an active secrets won't automatically update the pods that are using it. Restart those pods to have them use the new secret. +Any update to an active secrets won't automatically update the pods that are using it. Restart those pods to have them use the new secret. + +# Creating Secrets When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. @@ -25,15 +29,17 @@ When creating a secret, you can make it available for any deployment within a pr 5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. - >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. - > - > ![Bulk Key Value Pair Copy/Paste]({{< baseurl >}}/img/rancher/bulk-key-values.gif) + >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. + > + > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} 1. Click **Save**. **Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. -## What's Next? +Any update to an active secrets won't automatically update the pods that are using it. Restart those pods to have them use the new secret. + +# What's Next? Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md index 39308578377..6b0b289ef04 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md @@ -12,7 +12,7 @@ However, you also have the option of creating additional Service Discovery recor 1. From the **Global** view, open the project that you want to add a DNS record to. -1. Select the **Service Discovery** tab. Then click **Add Record**. +1. Click **Resources** in the main navigation bar. Click the **Service Discovery** tab. (In versions prior to v2.3.0, just click the **Service Discovery** tab.) Then click **Add Record**. 1. Enter a **Name** for the DNS record. This name is used for DNS resolution. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md index f5da74a4a81..617929af284 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/_index.md @@ -1,5 +1,6 @@ --- -title: Workloads +title: "Kubernetes Workloads and Pods" +description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" weight: 3025 aliases: - /rancher/v2.x/en/concepts/workloads/ diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md index e5c6c4f503e..abe6b1c5fe7 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md @@ -8,7 +8,7 @@ A _sidecar_ is a container that extends or enhances the main container in a pod. 1. From the **Global** view, open the project running the workload you want to add a sidecar to. -1. Select the **Workloads** tab. +1. Click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. 1. Find the workload that you want to extend. Select **Ellipsis icon (...) > Add a Sidecar**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md index ab046ce7d06..123c2fd295f 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md @@ -1,5 +1,6 @@ --- title: Deploying Workloads +description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. weight: 3026 aliases: - /rancher/v2.x/en/tasks/workloads/deploy-workloads/ @@ -9,13 +10,13 @@ Deploy a workload to run an application in one or more containers. 1. From the **Global** view, open the project that you want to deploy a workload to. -1. From the **Workloads** view, click **Deploy**. +1. 1. Click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. 1. Enter a **Name** for the workload. 1. Select a [workload type]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. 1. Either select an existing [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md index a10bcea6ebd..4be9cd00eaf 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/rollback-workloads/_index.md @@ -9,7 +9,7 @@ Sometimes there is a need to rollback to the previous version of the application 1. From the **Global** view, open the project running the workload you want to rollback. -1. Find the workload that you want to rollback and select **Vertical Elipsis (... ) > Rollback**. +1. Find the workload that you want to rollback and select **Vertical Ellipsis (... ) > Rollback**. 1. Choose the revision that you want to roll back to. Click **Rollback**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md index c00c15e0a97..5d47c733ed4 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md @@ -8,7 +8,7 @@ When a new version of an application image is released on Docker Hub, you can up 1. From the **Global** view, open the project running the workload you want to upgrade. -1. Find the workload that you want to upgrade and select **Vertical Elipsis (... ) > Edit**. +1. Find the workload that you want to upgrade and select **Vertical Ellipsis (... ) > Edit**. 1. Update the **Docker Image** to the updated version of the application image on Docker Hub. diff --git a/content/rancher/v2.x/en/overview/_index.md b/content/rancher/v2.x/en/overview/_index.md index d07ec95ea39..92c84b5cb81 100644 --- a/content/rancher/v2.x/en/overview/_index.md +++ b/content/rancher/v2.x/en/overview/_index.md @@ -4,11 +4,11 @@ weight: 1 --- Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. -## Run Kubernetes Everywhere +# Run Kubernetes Everywhere Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. -## Meet IT requirements +# Meet IT requirements Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: @@ -16,10 +16,52 @@ Rancher supports centralized authentication, access control, and monitoring for - Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. - View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. -## Empower DevOps Teams +# Empower DevOps Teams Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. ![Platform]({{< baseurl >}}/img/rancher/platform.png) + +# Features of the Rancher API Server + +The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: + +### Authorization and Role-Based Access Control + +- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.x/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.x/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.x/en/admin-settings/pod-security-policies/) policies. + +### Working with Kubernetes + +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.x/en/cluster-admin/upgrading-kubernetes) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.x/en/catalog/) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.x/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.x/en/k8s-in-rancher/) +- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.x/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +### Working with Cloud Infrastructure + +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.x/en/cluster-admin/nodes/) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/) in the cloud. + +### Cluster Visibility + +- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. Logging can be set up [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/logging/) +- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. Monitoring can be configured [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/monitoring/) +- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. To help you stay informed of these events, you can configure alerts [at the cluster level]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or [at the project level.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/) + +# Editing Downstream Clusters with Rancher + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. + +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.x/en/cluster-admin/editing-clusters/) + +The following table summarizes the options and settings available for each cluster type: + + Rancher Capability | RKE Launched | Hosted Kubernetes Cluster | Imported Cluster + ---------|----------|---------|---------| + Manage member roles | ✓ | ✓ | ✓ + Edit cluster options | ✓ | | + Manage node pools | ✓ | | \ No newline at end of file diff --git a/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md new file mode 100644 index 00000000000..016f7a8ce62 --- /dev/null +++ b/content/rancher/v2.x/en/overview/architecture-recommendations/_index.md @@ -0,0 +1,91 @@ +--- +title: Architecture Recommendations +weight: 3 +--- + +Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) + +This section covers the following topics: + +- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) +- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) +- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-ha-installations) +- [Environment for Kubernetes Installations](#environment-for-ha-installations) +- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-ha-installations) +- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) + +# Separation of Rancher and User Clusters + +A user cluster is a downstream Kubernetes cluster that runs your apps and services. + +If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. + +In Kubernetes Installations of Rancher, the Rancher server cluster should also be separate from the user clusters. + +![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) + +# Why HA is Better for Rancher in Production + +We recommend installing the Rancher server on a three-node Kubernetes cluster for production, primarily because it protects the Rancher server data. The Rancher server stores its data in etcd in both single-node and Kubernetes Installations. + +When Rancher is installed on a single node, if the node goes down, there is no copy of the etcd data available on other nodes and you could lose the data on your Rancher server. + +By contrast, in the high-availability installation, + +- The etcd data is replicated on three nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. +- A load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. Note: This [example]({{}}/rancher/v2.x/en/installation/k8s-install/create-nodes-lb/nginx/) of how to configure an NGINX server as a basic layer 4 load balancer (TCP). + +# Recommended Load Balancer Configuration for Kubernetes Installations + +We recommend the following configurations for the load balancer and Ingress controllers: + +* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
+![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) +Rancher installed on a Kubernetes cluster with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers + +# Environment for Kubernetes Installations + +It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. + +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. + +It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. + +# Recommended Node Roles for Kubernetes Installations + +We recommend installing Rancher on a Kubernetes cluster in which each node has all three Kubernetes roles: etcd, controlplane, and worker. + +### Comparing Node Roles for the Rancher Server Cluster and User Clusters + +Our recommendation for node roles on the Rancher server cluster contrast with our recommendations for the downstream user clusters that run your apps and services. We recommend that each node in a user cluster should have a single role for stability and scalability. + +![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) + +Kubernetes only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. + +We recommend that downstream user clusters should have at least: + +- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available +- **Two nodes with only the controlplane role** to make the master component highly available +- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services + +With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of user clusters. + +For more best practices for user clusters, refer to the [production checklist]({{}}/rancher/v2.x/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.x/en/best-practices/management/#tips-for-scaling-and-reliability) + +# Architecture for an Authorized Cluster Endpoint + +If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. + +If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.x/en/user-settings/api-keys/#creating-an-api-key) for more information. \ No newline at end of file diff --git a/content/rancher/v2.x/en/overview/architecture/_index.md b/content/rancher/v2.x/en/overview/architecture/_index.md index 63833ffcd4c..c28ab874aa8 100644 --- a/content/rancher/v2.x/en/overview/architecture/_index.md +++ b/content/rancher/v2.x/en/overview/architecture/_index.md @@ -3,84 +3,173 @@ title: Architecture weight: 1 --- -This section explains how Rancher interacts with the two fundamental technologies Rancher is built on: Docker and Kubernetes. +This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. -## Docker +For information on the different ways that Rancher can be installed, refer to the [overview of installation options.]({{}}/rancher/v2.x/en/installation/#overview-of-installation-options) -Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also setup private Docker registries. Docker is primarily used to manage containers on individual nodes. +For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.x/en/overview/#features-of-the-rancher-api-server) ->**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. +For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.x/en/overview/architecture-recommendations) -## Kubernetes +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.x/en/overview/concepts) page. -Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. +This section covers the following topics: -A Kubernetes cluster consists of multiple nodes. +- [Rancher server architecture](#rancher-server-architecture) +- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) + - [The authentication proxy](#1-the-authentication-proxy) + - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) + - [Node agents](#3-node-agents) + - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) +- [Important files](#important-files) +- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) +- [Rancher server components and source code](#rancher-server-components-and-source-code) -- **etcd database** +# Rancher Server Architecture - Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. +The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. -- **Master nodes** +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). - Master nodes are stateless and are used to run the API server, scheduler, and controllers. +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-in-rancher) for running your workloads. -- **Worker nodes** +The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: - The application workload runs on worker nodes. +
Managing Kubernetes Clusters through Rancher's Authentication Proxy
-## Rancher +![Architecture]({{< baseurl >}}/img/rancher/rancher-architecture-rancher-api-server.svg) -The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. +You can install Rancher on a single node, or on a high-availability Kubernetes cluster. -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). +A high-availability Kubernetes installation is recommended for production. A Docker installation may be used for development and testing purposes, but there is no migration path from a single-node to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. -![Architecture]({{< baseurl >}}/img/rancher/rancher-architecture.svg) +The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. -In this section we describe the functionalities of each Rancher server components. +# Communicating with Downstream User Clusters -#### Rancher API Server +This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. -Rancher API server is built on top of an embedded Kubernetes API server and etcd database. It implements the following functionalities: +The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. -- **User Management** +
Communicating with Downstream Clusters
- Rancher API server manages user identities that correspond to external authentication providers like Active Directory or GitHub. +![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) -- **Authorization** +The following descriptions correspond to the numbers in the diagram above: - Rancher API server manages access control and security policies. +1. [The Authentication Proxy](#1-the-authentication-proxy) +2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) +3. [Node Agents](#3-node-agents) +4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) -- **Projects** +### 1. The Authentication Proxy - A _project_ is a group of multiple namespaces and access control policies within a cluster. +In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see +the pods. Bob is authenticated through Rancher's authentication proxy. -- **Nodes** +The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. - Rancher API server tracks identities of all the nodes in all clusters. +Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. -#### Cluster Controller and Agents +By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. -The cluster controller and cluster agents implement the business logic required to manage Kubernetes clusters. +### 2. Cluster Controllers and Cluster Agents -- The _cluster controller_ implements the logic required for the global Rancher install. It performs the following actions: +Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. - - Configuration of access control policies to clusters and projects. +There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: - - Provisioning of clusters by calling: +- Watches for resource changes in the downstream cluster +- Brings the current state of the downstream cluster to the desired state +- Configures access control policies to clusters and projects +- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE - - The required Docker machine drivers. - - Kubernetes engines like RKE and GKE. +By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. +The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: -- A separate _cluster agent_ instance implements the logic required for the corresponding cluster. It performs the following activities: +- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters +- Manages workloads, pod creation and deployment within each cluster +- Applies the roles and bindings defined in each cluster's global policies +- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health - - Workload Management, such as pod creation and deployment within each cluster. +### 3. Node Agents - - Application of the roles and bindings defined in each cluster's global policies. +If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. - - Communication between clusters and Rancher Server: events, stats, node info, and health. +The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. -#### Authentication Proxy +### 4. Authorized Cluster Endpoint -The _authentication proxy_ forwards all Kubernetes API calls. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. Rancher communicates with Kubernetes clusters using a service account. +An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +There are two main reasons why a user might need the authorized cluster endpoint: + +- To access a downstream user cluster while Rancher is down +- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. + +> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. + +With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. + +You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubectl) + +# Important Files + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. +- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. + +For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.x/en/cluster-admin/cluster-access/kubeconfig/) documentation. + +# Tools for Provisioning Kubernetes Clusters + +The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. + +### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider + +Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) + +### Rancher Launched Kubernetes for Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. + +Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) + +### Hosted Kubernetes Providers + +When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) + +### Imported Kubernetes Clusters + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +# Rancher Server Components and Source Code + +This diagram shows each component that the Rancher server is composed of: + +![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) + +The GitHub repositories for Rancher can be found at the following links: + +- [Main Rancher server repository](https://github.com/rancher/rancher) +- [Rancher UI](https://github.com/rancher/ui) +- [Rancher API UI](https://github.com/rancher/api-ui) +- [Norman,](https://github.com/rancher/norman) Rancher's API framework +- [Types](https://github.com/rancher/types) +- [Rancher CLI](https://github.com/rancher/cli) +- [Catalog applications](https://github.com/rancher/helm) + +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.x/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/content/rancher/v2.x/en/overview/concepts/_index.md b/content/rancher/v2.x/en/overview/concepts/_index.md new file mode 100644 index 00000000000..f4fe9fc26f0 --- /dev/null +++ b/content/rancher/v2.x/en/overview/concepts/_index.md @@ -0,0 +1,72 @@ +--- +title: Kubernetes Concepts +weight: 4 +--- + +This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) + +This section covers the following topics: + +- [About Docker](#about-docker) +- [About Kubernetes](#about-kubernetes) +- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) +- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) + - [etcd Nodes](#etcd-nodes) + - [Controlplane Nodes](#controlplane-nodes) + - [Worker Nodes](#worker-nodes) +- [About Helm](#about-helm) + +# About Docker + +Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. + +>**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. + +# About Kubernetes + +Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. + +# What is a Kubernetes Cluster? + +A cluster is a group of computers that work together as a single system. + +A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. + +# Roles for Nodes in Kubernetes Clusters + +Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. + +A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. + +### etcd Nodes + +Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. + +The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. + +The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. + +Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. + +Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. + +### Controlplane Nodes + +Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although two or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. + +### Worker Nodes + +Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: + +- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. +- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. + +Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.x/en/k8s-in-rancher/workloads/). + +# About Helm + +For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. + +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). + +For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/_index.md b/content/rancher/v2.x/en/project-admin/_index.md index 9d024be480f..1fa9df84378 100644 --- a/content/rancher/v2.x/en/project-admin/_index.md +++ b/content/rancher/v2.x/en/project-admin/_index.md @@ -1,6 +1,8 @@ --- title: Project Administration weight: 2500 +aliases: + - /rancher/v2.x/en/project-admin/editing-projects/ --- _Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. @@ -18,10 +20,11 @@ You can use projects to perform actions like: - [Assign users access to a group of namespaces]({{< baseurl >}}/rancher/v2.x/en/project-admin/project-members) - Assign users [specific roles in a project]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/) -- [Edit project settings]({{< baseurl >}}/rancher/v2.x/en/project-admin/editing-projects/) - [Set resource quotas]({{< baseurl >}}/rancher/v2.x/en/project-admin/resource-quotas/) - [Manage namespaces]({{< baseurl >}}/rancher/v2.x/en/project-admin/namespaces/) - [Configure tools]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/) +- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.x/en/project-admin/pipelines) +- [Configure pod security policies]({{}}/rancher/v2.x/en/project-admin/pod-security-policies) ### Authorization diff --git a/content/rancher/v2.x/en/project-admin/editing-projects/_index.md b/content/rancher/v2.x/en/project-admin/editing-projects/_index.md deleted file mode 100644 index 86129458b8f..00000000000 --- a/content/rancher/v2.x/en/project-admin/editing-projects/_index.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Editing Projects -weight: 2510 -aliases: - - /rancher/v2.x/en/tasks/projects/create-project/ - - /rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/ ---- - -After projects are created, there are certain aspects that can be changed later. - -## Adding Members - -Following project creation, you can add users as project members so that they can access its resources. - -1. From the **Global** view, open the project that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the project. - - If external authentication is configured: - - - Rancher returns users from your external authentication source as you type. - - - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. - -1. Assign the user or group **Project** roles. - - [What are Project Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) - - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- For `Custom` roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). - -**Result:** The chosen users are added to the project. - -- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. - -## Editing the Pod Security Policy - ->**Note:** These cluster options are only available for [clusters that Rancher has launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -You can always assign a PSP to an existing project if you didn't assign one during creation. - ->**Prerequisites:** -> -> - Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). -> - Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [Existing Cluster: Adding a Pod Security Policy]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#adding-changing-a-pod-security-policy). - -1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. - -1. From the main menu, select **Projects/Namespaces**. - -3. Find the project that you want to add a PSP to. From that project, select **Vertical Ellipsis (...) > Edit**. - -4. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. - Assigning a PSP to a project will: - - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. - -5. Click **Save**. - -**Result:** The PSP is applied to the project and any namespaces added to the project. - ->**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. - -## Editing Resource Quotas - -_Available as of v2.0.1_ - -Edit [resource quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - -- You want to limit the resources that a project and its namespaces can use. -- You want to scale the resources available to a project up or down when a research quota is already in effect. - -1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to add a resource quota to. From that project, select **Ellipsis (...) > Edit**. - -1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. - -1. Select a [Resource Type]({{< baseurl >}}/rancher/v2.x/en/project-admin/resource-quotas/#resource-quota-types). - -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - -1. **Optional:** Add more quotas. - -1. Click **Create**. - -**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, Rancher won't let you save your changes. - - -## Editing Container Default Resource Limit - -_Available as of v2.2.0_ - -Edit [container default resource limit]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) when: - -- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. -- You want to edit the default container resource limit. - -1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to edit the container default resource limit. From that project, select **Ellipsis (...) > Edit**. - -1. Expand **Container Default Resource Limit** and edit the values. diff --git a/content/rancher/v2.x/en/project-admin/istio/_index.md b/content/rancher/v2.x/en/project-admin/istio/_index.md index c2755a94d49..82ef83353cf 100644 --- a/content/rancher/v2.x/en/project-admin/istio/_index.md +++ b/content/rancher/v2.x/en/project-admin/istio/_index.md @@ -1,55 +1,21 @@ --- -title: How to Use Istio in Your Project +title: Istio weight: 3528 --- -_Available as of v2.3.0-alpha5_ +_Available as of v2.3.0_ Using Rancher, you can connect, secure, control, and observe services through integration with [Istio](https://istio.io/), a leading open-source service mesh solution. Istio provides behavioral insights and operational control over the service mesh as a whole, offering a complete solution to satisfy the diverse requirements of microservice applications. -Istio requires each pod in the service mesh to run an Istio compatible sidecar. This section describes how to set up Istio sidecar auto injection in the Rancher UI. For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). +This service mesh provides features that include but are not limited to the following: ->**Prerequisites:** -> ->- [Istio]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/istio/) must be enabled in the cluster. ->- To be a part of an Istio service mesh, pods and services in a Kubernetes cluster must satisfy the [Istio Pods and Services Requirements](https://istio.io/docs/setup/kubernetes/prepare/requirements/). +- Traffic management features +- Enhanced monitoring and tracing +- Service discovery and routing +- Secure connections and service-to-service authentication with mutual TLS +- Load balancing +- Automatic retries, backoff, and circuit breaking -## Istio Sidecar Auto Injection +Istio needs to be set up by a Rancher administrator or cluster administrator before it can be used in a project for [comprehensive data visualizations,]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/#accessing-visualizations) traffic management, or any of its other features. -If an Istio sidecar is not injected into a pod, Istio will not work for that pod. If you enable Istio sidecar auto injection for a namespace, all pods created in the namespace will have an injected Istio sidecar. - -In the create and edit namespace page, you can enable or disable [Istio sidecar auto injection](https://istio.io/blog/2019/data-plane-setup/#automatic-injection). When you enable it, Rancher will add `istio-injection=enabled` label to the namespace automatically. - -Injection occurs at pod creation time. If the pod has been created before you enable auto injection, you need to kill the running pod and verify that a new pod is created with the injected sidecar. - -For information on how to inject the Istio sidecar manually, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). - -## View Traffic Graph - -Rancher integrates a Kiali graph into the Rancher UI. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. - -To see the traffic graph for a particular namespace: - -1. From the **Global** view, navigate to the project that you want to view traffic graph for. - -1. Select **Istio** in the navigation bar. - -1. Select **Traffic Graph** in the navigation bar. - -1. Select the namespace. Note: It only shows the namespaces which have the `istio-injection=enabled` label. - -## View Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. To see the Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count and Request Duration metrics: - -1. From the **Global** view, navigate to the project that you want to view traffic metrics for. - -1. Select **Istio** in the navigation bar. - -1. Select **Traffic Metrics** in the navigation bar. - - -## Other Istio Features - -There are many other [Istio Features](https://istio.io/docs/concepts/what-is-istio/#core-features) -that you can now use in your cluster. +For information on how Istio is integrated with Rancher and how to set it up, refer to the [section about Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio) diff --git a/content/rancher/v2.x/en/project-admin/namespaces/_index.md b/content/rancher/v2.x/en/project-admin/namespaces/_index.md index 96c03538fae..ef84e757bbe 100644 --- a/content/rancher/v2.x/en/project-admin/namespaces/_index.md +++ b/content/rancher/v2.x/en/project-admin/namespaces/_index.md @@ -60,21 +60,6 @@ Cluster admins and members may occasionally need to move a namespace to another ### Editing Namespace Resource Quotas -If there is a [resource quota]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. +You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. -1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the namespace for which you want to edit the resource quota. Select **Ellipsis (...) > Edit**. - -1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - - For more information about each **Resource Type**, see [Resource Quota Types]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). - - >**Note:** - > - >- If a resource quota is not configured for the project, these options will not be available. - >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. - -**Result:** The namespace's default resource quota is overwritten with your override. +For more information, see how to [edit namespace resource quotas]({{< baseurl >}}/rancher/v2.x/en/project-admin//resource-quotas/override-namespace-default/#editing-namespace-resource-quotas). \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md b/content/rancher/v2.x/en/project-admin/pipelines/_index.md similarity index 91% rename from content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md rename to content/rancher/v2.x/en/project-admin/pipelines/_index.md index 8b7cc985f05..0c65147cd77 100644 --- a/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md +++ b/content/rancher/v2.x/en/project-admin/pipelines/_index.md @@ -1,16 +1,27 @@ --- -title: Pipelines -weight: 2529 +title: Rancher's CI/CD Pipelines +description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users +weight: 4000 aliases: - /rancher/v2.x/en/concepts/ci-cd-pipelines/ - /rancher/v2.x/en/tasks/pipelines/ - - /rancher/v2.x/en/tools/pipelines/ - /rancher/v2.x/en/tools/pipelines/configurations/ --- ->**Notes:** -> ->- Pipelines are new and improved for Rancher v2.1! Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). +Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +To set up a pipeline, you'll first need to authorize Rancher using your GitHub settings. Directions are provided in the Rancher UI. After authorizing Rancher in GitHub, provide Rancher with a client ID and secret to authenticate. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + + + + A _pipeline_ is a software delivery process that is broken into different stages and steps. Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. @@ -30,6 +41,12 @@ Typically, pipeline stages include: Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can [configure version control providers](#version-control-providers) and [manage global pipeline execution settings](#managing-global-pipeline-execution-settings). Project members can only configure [repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) and [pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). + +> **Notes:** +> +> - Pipelines were improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. +> - Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). + ## Overview Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. @@ -254,7 +271,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin ### A. Configuring Persistent Data for Docker Registry -1. From the project that you're configuring a pipeline for, select the **Workloads** tab. +1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. 1. Find the `docker-registry` workload and select **Ellipsis (...) > Edit**. @@ -301,7 +318,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin ### B. Configuring Persistent Data for Minio -1. From the **Workloads** tab, find the `minio` workload and select **Ellipsis (...) > Edit**. +1. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **Ellipsis (...) > Edit**. 1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: diff --git a/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md similarity index 95% rename from content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md rename to content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md index 0afe3b45e5f..5febdc414c6 100644 --- a/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md +++ b/content/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/_index.md @@ -2,7 +2,7 @@ title: v2.0.x Pipeline Documentation weight: 9000 aliases: - - /rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x + - /rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x --- >**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines). @@ -35,9 +35,7 @@ You can set up your pipeline to run a series of stages and steps to test your co 1. Go to the project you want this pipeline to run in. -2. Select workloads from the top level Nav bar - -3. Select pipelines from the secondary Nav bar +2. Click **Resources > Pipelines.** In versions prior to v2.3.0,click **Workloads > Pipelines.** 4. Click Add pipeline button. @@ -47,7 +45,7 @@ You can set up your pipeline to run a series of stages and steps to test your co - Only the branch {BRANCH NAME}: Only events triggered by changes to this branch will be built. - - Evertyhing but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. + - Everything but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. - All branches: Regardless of the branch that triggered the event always build. diff --git a/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md new file mode 100644 index 00000000000..e92356c11c6 --- /dev/null +++ b/content/rancher/v2.x/en/project-admin/pod-security-policies/_index.md @@ -0,0 +1,31 @@ +--- +title: Pod Security Policies +weight: 5600 +--- + +> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + +You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. + +### Prerequisites + +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{< baseurl >}}/rancher/v2.x/en/admin-settings/pod-security-policies/). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [Existing Cluster: Adding a Pod Security Policy]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/editing-clusters/#adding-changing-a-pod-security-policy). + +### Applying a Pod Security Policy + +1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to add a PSP to. From that project, select **Vertical Ellipsis (...) > Edit**. +1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. + Assigning a PSP to a project will: + + - Override the cluster's default PSP. + - Apply the PSP to the project. + - Apply the PSP to any namespaces you add to the project later. + +1. Click **Save**. + +**Result:** The PSP is applied to the project and any namespaces added to the project. + +>**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/project-members/_index.md b/content/rancher/v2.x/en/project-admin/project-members/_index.md index 386d5b80025..00c97f2098a 100644 --- a/content/rancher/v2.x/en/project-admin/project-members/_index.md +++ b/content/rancher/v2.x/en/project-admin/project-members/_index.md @@ -8,14 +8,46 @@ aliases: If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. +You can add members to a project as it is created, or add them to an existing project. + >**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/cluster-members/) instead. -There are two contexts where you can add project members: +### Adding Members to a New Project -- [Adding Members when Creating New Projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/) - You can add members to a project as you create it (recommended if possible). +### Adding Members to an Existing Project -- [Adding Members to an Existing Project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/) +Following project creation, you can add users as project members so that they can access its resources. - You can always add members to a project later. +1. From the **Global** view, open the project that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the project. + + If external authentication is configured: + + - Rancher returns users from your external authentication source as you type. + + - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. + +1. Assign the user or group **Project** roles. + + [What are Project Roles?]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) + + >**Notes:** + > + >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + > + >- For `Custom` roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles). + > - To remove roles from the list, [Lock/Unlock Roles]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/locked-roles/). + +**Result:** The chosen users are added to the project. + +- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md index 150463f4a8c..03bcf25570a 100644 --- a/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/_index.md @@ -1,115 +1,46 @@ --- -title: Resource Quotas +title: Project Resource Quotas weight: 2515 -aliases: - - /rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/ --- _Available as of v2.1.0_ In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. -## Resource Quotas in Rancher +This page is a how-to guide for creating resource quotas in existing projects. -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/projects-and-namespaces/#creating-projects) -In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. +> Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) -In the following diagram, a Kubernetes admin is trying to enforce a resource quota without Rancher. The admin wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The admin has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. +### Applying Resource Quotas to Existing Projects -Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{< baseurl >}}/img/rancher/kubernetes-resource-quota.svg) +_Available as of v2.0.1_ -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the [project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects), and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can [override it](#overriding-the-default-limit-for-a-namespace). +Edit [resource quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: -The resource quota includes two limits, which you set while creating or editing a project: - +- You want to limit the resources that a project and its namespaces can use. +- You want to scale the resources available to a project up or down when a research quota is already in effect. -- **Project Limits:** +1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. +1. From the main menu, select **Projects/Namespaces**. -- **Namespace Default Limits:** +1. Find the project that you want to add a resource quota to. From that project, select **Ellipsis (...) > Edit**. - This value is the default resource limit available for each namespace. When the resource quota is set on the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you [override it](#namespace-default-limit-overrides). +1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. -In the following diagram, a Rancher admin wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the admin can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`). +1. Select a [Resource Type]({{< baseurl >}}/rancher/v2.x/en/project-admin/resource-quotas/#resource-quota-types). -Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{< baseurl >}}/img/rancher/rancher-resource-quota.svg) +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -The following table explains the key differences between the two quota types. + | Field | Description | + | ----------------------- | -------------------------------------------------------------------------------------------------------- | + | Project Limit | The overall resource limit for the project. | + | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | -| Rancher Resource Quotas | Kubernetes Resource Quotas | -| ---------------------------------------------------------- | -------------------------------------------------------- | -| Applies to projects and namespace. | Applies to namespaces only. | -| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | -| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. +1. **Optional:** Add more quotas. +1. Click **Create**. -## Creating Resource Quotas - -You can create resource quotas in the following contexts: - -- [While creating projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#creating-projects) -- [While editing projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/#editing-resource-quotas) - -## Resource Quota Types - -When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit* | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the project/namespace.1 | -| CPU Reservation* | The minimum amount of CPU (in millicores) guaranteed to the project/namespace.1 | -| Memory Limit* | The maximum amount of memory (in bytes) allocated to the project/namespace.1 | -| Memory Reservation* | The minimum amount of memory (in bytes) guaranteed to the project/namespace.1 | -| Storage Reservation | The minimum amount of storage (in gigabytes) guaranteed to the project/namespace. | -| Services Load Balancers | The maximum number of load balancers services that can exist in the project/namespace. | -| Services Node Ports | The maximum number of node port services that can exist in the project/namespace. | -| Pods | The maximum number of pods that can exist in the project/namespace in a non-terminal state (i.e., pods with a state of `.status.phase in (Failed, Succeeded)` equal to true). | -| Services | The maximum number of services that can exist in the project/namespace. | -| ConfigMaps | The maximum number of ConfigMaps that can exist in the project/namespace. | -| Persistent Volume Claims | The maximum number of persistent volume claims that can exist in the project/namespace. | -| Replications Controllers | The maximum number of replication controllers that can exist in the project/namespace. | -| Secrets | The maximum number of secrets that can exist in the project/namespace. | - ->***** When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. As of v2.2.0, a [container default resource limit](#setting-container-default-resource-limit) can be set at the same time to avoid the need to explicitly set these limits for every workload. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -## Overriding the Default Limit for a Namespace - -Although the **Namespace Default Limit** propagates from the project to each namespace, in some cases, you may need to increase (or decrease) the performance for a specific namespace. In this situation, you can override the default limits by editing the namespace. - -In the diagram below, the Rancher admin has a resource quota in effect for their project. However, the admin wants to override the namespace limits for `Namespace 3` so that it performs better. Therefore, the admin [raises the namespace limits]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. - -Namespace Default Limit Override -![Namespace Default Limit Override]({{< baseurl >}}/img/rancher/rancher-resource-quota-override.svg) - -How to: [Editing Namespace Resource Quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) - -### Editing Namespace Resource Quotas - -You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -For more information, see how to [edit namespace resource quotas]({{< baseurl >}}/rancher/v2.x/en/project-admin/namespaces/#editing-namespace-resource-quota/). - -## Setting Container Default Resource Limit - -_Available as of v2.2.0_ - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. - -> **Note:** Prior to v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you will be able to set a default container resource limit on a project and launch any catalog applications. - -Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| -| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | -| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | -| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. +**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, Rancher won't let you save your changes. diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md new file mode 100644 index 00000000000..bd9d1517459 --- /dev/null +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/override-container-default/_index.md @@ -0,0 +1,43 @@ +--- +title: Setting Container Default Resource Limits +weight: 3 +--- + +_Available as of v2.2.0_ + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. + +To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +### Editing the Container Default Resource Limit + +_Available as of v2.2.0_ + +Edit [container default resource limit]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#setting-container-default-resource-limit) when: + +- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. +- You want to edit the default container resource limit. + +1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to edit the container default resource limit. From that project, select **Ellipsis (...) > Edit**. +1. Expand **Container Default Resource Limit** and edit the values. + +### Resource Limit Propagation + +When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. + +> **Note:** Prior to v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you can set a default container resource limit on a project and launch any catalog applications. + +Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. + +### Container Resource Quota Types + +The following resource limits can be configured: + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| +| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | +| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | +| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md new file mode 100644 index 00000000000..0501008f985 --- /dev/null +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/_index.md @@ -0,0 +1,34 @@ +--- +title: Overriding the Default Limit for a Namespace +weight: 2 +--- + +Although the **Namespace Default Limit** propagates from the project to each namespace, in some cases, you may need to increase (or decrease) the performance for a specific namespace. In this situation, you can override the default limits by editing the namespace. + +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it performs better. Therefore, the administrator [raises the namespace limits]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) for `Namespace 3` so that the namespace can access more resources. + +Namespace Default Limit Override +![Namespace Default Limit Override]({{< baseurl >}}/img/rancher/rancher-resource-quota-override.svg) + +How to: [Editing Namespace Resource Quotas]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#editing-namespace-resource-quotas) + +### Editing Namespace Resource Quotas + +If there is a [resource quota]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. + +1. From the main menu, select **Projects/Namespaces**. + +1. Find the namespace for which you want to edit the resource quota. Select **Ellipsis (...) > Edit**. + +1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. + + For more information about each **Resource Type**, see [Resource Quota Types]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/#resource-quota-types). + + >**Note:** + > + >- If a resource quota is not configured for the project, these options will not be available. + >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. + +**Result:** The namespace's default resource quota is overwritten with your override. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md new file mode 100644 index 00000000000..e671a9afdb1 --- /dev/null +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/quota-type-reference/_index.md @@ -0,0 +1,24 @@ +--- +title: Resource Quota Type Reference +weight: 4 +--- + +When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit* | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the project/namespace.1 | +| CPU Reservation* | The minimum amount of CPU (in millicores) guaranteed to the project/namespace.1 | +| Memory Limit* | The maximum amount of memory (in bytes) allocated to the project/namespace.1 | +| Memory Reservation* | The minimum amount of memory (in bytes) guaranteed to the project/namespace.1 | +| Storage Reservation | The minimum amount of storage (in gigabytes) guaranteed to the project/namespace. | +| Services Load Balancers | The maximum number of load balancers services that can exist in the project/namespace. | +| Services Node Ports | The maximum number of node port services that can exist in the project/namespace. | +| Pods | The maximum number of pods that can exist in the project/namespace in a non-terminal state (i.e., pods with a state of `.status.phase in (Failed, Succeeded)` equal to true). | +| Services | The maximum number of services that can exist in the project/namespace. | +| ConfigMaps | The maximum number of ConfigMaps that can exist in the project/namespace. | +| Persistent Volume Claims | The maximum number of persistent volume claims that can exist in the project/namespace. | +| Replications Controllers | The maximum number of replication controllers that can exist in the project/namespace. | +| Secrets | The maximum number of secrets that can exist in the project/namespace. | + +>***** When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. As of v2.2.0, a [container default resource limit](#setting-container-default-resource-limit) can be set at the same time to avoid the need to explicitly set these limits for every workload. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md new file mode 100644 index 00000000000..73d7c180f80 --- /dev/null +++ b/content/rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/_index.md @@ -0,0 +1,39 @@ +--- +title: How Resource Quotas Work in Rancher Projects +weight: 1 +--- + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to [projects]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects). + +In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. + +In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. + +Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace +![Native Kubernetes Resource Quota Implementation]({{< baseurl >}}/img/rancher/kubernetes-resource-quota.svg) + +Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the [project]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#projects), and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can [override it](#overriding-the-default-limit-for-a-namespace). + +The resource quota includes two limits, which you set while creating or editing a project: + + +- **Project Limits:** + + This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. + +- **Namespace Default Limits:** + + This value is the default resource limit available for each namespace. When the resource quota is set on the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you [override it](#namespace-default-limit-overrides). + +In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`). + +Rancher: Resource Quotas Propagating to Each Namespace +![Rancher Resource Quota Implementation]({{< baseurl >}}/img/rancher/rancher-resource-quota.svg) + +The following table explains the key differences between the two quota types. + +| Rancher Resource Quotas | Kubernetes Resource Quotas | +| ---------------------------------------------------------- | -------------------------------------------------------- | +| Applies to projects and namespace. | Applies to namespaces only. | +| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | +| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/tools/_index.md b/content/rancher/v2.x/en/project-admin/tools/_index.md index 94e0c446564..0921bdc4cf6 100644 --- a/content/rancher/v2.x/en/project-admin/tools/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/_index.md @@ -1,76 +1,41 @@ --- -title: Configuring Tools +title: Tools for Logging, Monitoring, and Visibility weight: 2525 --- Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- [Alerts](#alerts) +- [Notifiers and Alerts](#notifiers-and-alerts) - [Logging](#logging) -- [Pipelines](#pipelines) - [Monitoring](#monitoring) -## Alerts +## Notifiers and Alerts -To keep your clusters and applications healthy and driving your organizational productivity forward, you need stay informed of events occurring in your clusters, both planned and unplanned. To help you stay informed of these events, Rancher allows you to configure alerts. +Notifiers and alerts are two features that work together to inform you of events in the Rancher system. -_Alerts_ are sets of rules, chosen by you, to monitor for specific events. The scope for alerts can be set at either the cluster or project level. +[Notifiers]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. -Some examples of alert events are: - -- A Kubernetes [master component]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#kubernetes-cluster-node-components) entering an unhealthy state. -- A node or [workload]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/) error occurring. -- A scheduled deployment taking place as planned. -- A node's hardware resources becoming overstressed. - -When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Additionally, you can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. - -You can configure alerts at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/alerts/). +[Alerts]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts) are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. ## Logging -Rancher can integrate with popular external services used for event streams, telemetry, or search. Rancher can integrate with the following services: +Logging is helpful because it allows you to: -- Elasticsearch -- splunk -- kafka -- syslog -- fluentd +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems -These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. -When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. Additionally, you'll have the opportunity to enter key value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key value pairs. - -You can configure these services to collect logs at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/logging/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/logging/). - -## Pipelines - -Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -To set up a pipeline, you'll first need to authorize Rancher using your GitHub settings. Directions are provided in the Rancher UI. After authorizing Rancher in GitHub, provide Rancher with a client ID and secret to authenticate. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - -For more information, see [Pipelines]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/). +For details, refer to the [logging section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging) ## Monitoring _Available as of v2.2.0_ -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. Prometheus provides a _time series_ of your data, which is a stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. - -In other words, Prometheus let's you view metrics from your different Rancher and Kubernetes objects. Using timestamps, you can query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. Multi-tenancy support in terms of cluster and project-only Prometheus instances are also supported. - -You can configure these services to collect logs at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) or [project level]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/monitoring/). +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring) diff --git a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md index d68b22c393b..fa9c7b0bdaa 100644 --- a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md @@ -3,15 +3,24 @@ title: Alerts weight: 2526 --- -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. To help you stay informed of these events, you can configure alerts. +To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. -Alerts are sets of rules, chosen by you, to monitor for specific events. +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. +Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) must be configured at the cluster level. + +Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. + +This section covers the following topics: + +- [Alerts scope](#alerts-scope) +- [Default project-level alerts](#default-project-level-alerts) +- [Adding project alerts](#adding-project-alerts) +- [Managing project alerts](#managing-project-alerts) ## Alerts Scope - The scope for alerts can be set at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. +The scope for alerts can be set at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. At the project level, Rancher monitors specific deployments and sends alerts for: @@ -20,9 +29,20 @@ At the project level, Rancher monitors specific deployments and sends alerts for * Pod status * The Prometheus expression cross the thresholds +## Default Project-level Alerts + +When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier]({{}}/rancher/v2.x/en/cluster-admin/tools/notifiers) for them is configured at the cluster level. + +| Alert | Explanation | +|-------|-------------| +| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | +| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | + +For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) + ## Adding Project Alerts ->**Prerequisite:** Before you can receive project alerts, you must [add a notifier]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers). +>**Prerequisite:** Before you can receive project alerts, you must add a notifier. 1. From the **Global** view, navigate to the project that you want to configure project alerts for. Select **Tools > Alerts**. In versions prior to v2.2.0, you can choose **Resources > Alerts**. @@ -31,6 +51,7 @@ At the project level, Rancher monitors specific deployments and sends alerts for 1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. 1. Based on the type of alert you want to create, complete one of the instruction subsets below. + {{% accordion id="pod" label="Pod Alerts" %}} This alert type monitors for the status of a specific pod. @@ -146,14 +167,14 @@ If you enable [project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin 1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/notifiers/) that send you alerts. +1. Finally, choose the [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. - You can set up multiple notifiers. - You can change notifier recipients on the fly. **Result:** Your alert is configured. A notification is sent when the alert is triggered. -#### Managing Project Alerts +## Managing Project Alerts To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions prior to v2.2.0, you can choose **Resources > Alerts**. You can: diff --git a/content/rancher/v2.x/en/project-admin/tools/logging/_index.md b/content/rancher/v2.x/en/project-admin/tools/logging/_index.md index 680f9aa2e86..5e842ce96c7 100644 --- a/content/rancher/v2.x/en/project-admin/tools/logging/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/logging/_index.md @@ -5,6 +5,8 @@ weight: 2527 Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +For background information about how logging integrations work, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/tools/logging/#how-logging-integrations-work) + Rancher supports the following services: - Elasticsearch diff --git a/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md b/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md index c82f4fcbe08..7174c065867 100644 --- a/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/monitoring/_index.md @@ -5,15 +5,19 @@ weight: 2528 _Available as of v2.2.4_ -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. ->A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. +> For more information about how Prometheus works, refer to the [cluster administration section.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#about-prometheus) -In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. Multi-tenancy support in terms of cluster and project-only Prometheus instances are also supported. +This section covers the following topics: -Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. +- [Monitoring scope](#monitoring-scope) +- [Permissions to configure project monitoring](#permissions-to-configure-project-monitoring) +- [Enabling project monitoring](#enabling-project-monitoring) +- [Project-level monitoring resource requirements](#project-level-monitoring-resource-requirements) +- [Project metrics](#project-metrics) -## Monitoring Scope +### Monitoring Scope Using Prometheus, you can monitor Rancher at both the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. @@ -25,9 +29,15 @@ Using Prometheus, you can monitor Rancher at both the [cluster level]({{< baseur - Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. -## Configuring Project Monitoring +### Permissions to Configure Project Monitoring -1. From the **Global** view, navigate to the project that you want to configure project monitoring. +Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. + +### Enabling Project Monitoring + +> **Prerequisite:** Cluster monitoring must be [enabled.]({{}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) + +1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. 1. Select **Tools > Monitoring** in the navigation bar. @@ -35,7 +45,7 @@ Using Prometheus, you can monitor Rancher at both the [cluster level]({{< baseur 1. Click **Save**. -### Project Level Monitoring Resource Requirements +### Project-Level Monitoring Resource Requirements Container| CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ---------|---------------|---------------|-------------|-------------|------------- @@ -45,14 +55,27 @@ Grafana | 100m | 100Mi | 200m | 200Mi | No **Result:** A single application,`project-monitoring`, is added as an [application]({{< baseurl >}}/rancher/v2.x/en/catalog/apps/) to the project. After the application is `active`, you can start viewing [project metrics](#project-metrics) through the [Rancher dashboard]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#rancher-dashboard) or directly from [Grafana]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/#grafana). -## Project Metrics +### Project Metrics -If [cluster monitoring]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) is also enabled for the project, [workload metrics]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#workload-metrics) are available for the project. +[Workload metrics]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/monitoring/) and at the [project level.](#enabling-project-monitoring) -You can monitor custom metrics from any [exporters](https://prometheus.io/docs/instrumenting/exporters/) as long as project monitoring is enabled. You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. +You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. -### Example +> **Example:** +> A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). After enabling project monitoring, you can edit the application to configure the Advanced Options -> Custom Metrics section. Enter the `Container Port` and `Path` and select the `Protocol`. -A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). +To access a project-level Grafana instance, -After enabling project monitoring, you can edit the application to configure the **Advanced Options -> Custom Metrics** section. Enter the `Container Port` and `Path` and select the `Protocol`. +1. From the **Global** view, navigate to a cluster that has monitoring enabled. + +1. Go to a project that has monitoring enabled. + +1. From the project view, click **Apps.** In versions prior to v2.2.0, choose **Catalog Apps** on the main navigation bar. + +1. Go to the `project-monitoring` application. + +1. In the `project-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. + +1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. + +**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.x/en/quick-start-guide/_index.md b/content/rancher/v2.x/en/quick-start-guide/_index.md index 5cdb50a4b1a..630450f42d2 100644 --- a/content/rancher/v2.x/en/quick-start-guide/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/_index.md @@ -1,6 +1,7 @@ --- -title: Quick Start Guides -short title: Quick Start Index +title: Rancher Deployment Quick Start Guides +metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. weight: 25 --- >**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{< baseurl >}}/rancher/v2.x/en/installation/). @@ -11,4 +12,6 @@ We have Quick Start Guides for: - [Deploying Rancher Server]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. -- [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide//workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. +- [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. + +- [Using the CLI]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/content/rancher/v2.x/en/quick-start-guide/cli/_index.md b/content/rancher/v2.x/en/quick-start-guide/cli/_index.md new file mode 100644 index 00000000000..5924bcc2d25 --- /dev/null +++ b/content/rancher/v2.x/en/quick-start-guide/cli/_index.md @@ -0,0 +1,22 @@ +--- +title: CLI with Rancher +weight: 100 +--- + +Interact with Rancher using command line interface (CLI) tools from your workstation. + +## Rancher CLI + +Follow the steps in [rancher cli](../cli). + +Ensure you can run `rancher kubectl get pods` successfully. + + +## kubectl +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + + +Configure kubectl by visiting your cluster in the Rancher Web UI then clicking on `Kubeconfig`, copying contents and putting into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md index 7519a654cbf..65fee61875a 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/amazon-aws-qs/_index.md @@ -1,5 +1,6 @@ --- -title: Amazon AWS Quick Start +title: Rancher AWS Quick Start Guide +description: Read this step by step Rancher AWS guide to quickly deploy a Rancher Server with a single node cluster attached. weight: 100 --- The following steps will quickly deploy a Rancher Server with a single node cluster attached. diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md index 2d4a3a0b82a..fc89564232a 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md @@ -4,7 +4,7 @@ weight: 300 --- Howdy Partner! This tutorial walks you through: -- Installation of {{< product >}} {{< version >}} +- Installation of {{< product >}} 2.x - Creation of your first cluster - Deployment of an application, Nginx @@ -36,7 +36,7 @@ This Quick Start Guide is divided into different tasks for easier consumption. >**Note:** > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. > - > For a full list of port requirements, refer to [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/references). + > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/). Provision the host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md index 6aaaf287912..ebf52672472 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md @@ -19,7 +19,7 @@ For this workload, you'll be deploying the application Rancher Hello-World. 3. Open the **Project: Default** project. -4. From the main menu select **Workloads**, then click on the **Workloads** tab. +4. Click **Resources > Workloads.** In versions prior to v2.3.0, click **Workloads > Workloads.** 5. Click **Deploy**. @@ -49,7 +49,7 @@ Now that the application is up and running it needs to be exposed so that other 3. Open the **Default** project. -4. From the main menu select **Workloads**, then click on the **Load Balancing** tab. +4. Click **Resources > Workloads > Load Balancing.** In versions prior to v2.3.0, click the **Workloads** tab. Click on the **Load Balancing** tab. 5. Click **Add Ingress**. diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md index 2de08faf907..ace03022684 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md @@ -19,7 +19,7 @@ For this workload, you'll be deploying the application Rancher Hello-World. 3. Open the **Project: Default** project. -4. From the main menu select **Workloads**, then click on the **Workloads** tab. +4. Click **Resources > Workloads.** In versions prior to v2.3.0, click **Workloads > Workloads.** 5. Click **Deploy**. diff --git a/content/rancher/v2.x/en/removing-rancher/_index.md b/content/rancher/v2.x/en/removing-rancher/_index.md deleted file mode 100644 index e7f11cc45a4..00000000000 --- a/content/rancher/v2.x/en/removing-rancher/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Removing Rancher Server -weight: 7501 -aliases: - - /rancher/v2.x/en/installation/removing-rancher/cleaning-cluster-nodes/ - - /rancher/v2.x/en/installation/removing-rancher/ - - /rancher/v2.x/en/admin-settings/removing-rancher/ - - /rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/ ---- - -When you deploy Rancher and use it to provision clusters, Rancher installs its components on the nodes you use. There are two contexts in which you'd remove Rancher from a Kubernetes cluster node. - -- **[Removing Rancher from Your Rancher Server Nodes]({{< baseurl >}}/rancher/v2.x/en/system-tools/#remove)**: In this context, you are removing Rancher from the Kubernetes cluster that you configured for your [Rancher installation]({{< baseurl >}}/rancher/v2.x/en/installation/ha/). This can be done using [System Tools]({{< baseurl >}}/rancher/v2.x/en/system-tools/). -- **[Removing Rancher Components from Rancher Launched Kubernetes Clusters]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/user-cluster-nodes/)**: In this context, you are removing Rancher components from Kubernetes clusters that you [launched using Rancher]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). diff --git a/content/rancher/v2.x/en/security/_index.md b/content/rancher/v2.x/en/security/_index.md index 95c6727e47c..7f1c9ff3da6 100644 --- a/content/rancher/v2.x/en/security/_index.md +++ b/content/rancher/v2.x/en/security/_index.md @@ -20,21 +20,69 @@ weight: 7505 +Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.x/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. + +On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: + +- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) +- [Guide to hardening Rancher installations](#rancher-hardening-guide) +- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) +- [Third-party penetration test reports](#third-party-penetration-test-reports) +- [Rancher CVEs and resolutions](#rancher-cves-and-resolutions) +- [Security Tips and Best Practices](#security-tips-and-best-practices) + +### Running a CIS Security Scan on a Kubernetes Cluster + +_Available as of v2.4.0-alpha1_ + +Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. + +The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. + +When Rancher runs a CIS Security Scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. + +For details, refer to the section on [security scans.]({{}}/rancher/v2.x/en/security/security-scan) + ### Rancher Hardening Guide -The Rancher Hardening Guide is based off of controls and best practices found in the [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes/) from the Center for Internet Security. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. and Rancher v2.2.x. See Rancher's [Self Assessment of the CIS Kubernetes Benchmark](#cis-benchmark-rancher-self-assessment) for the full list of security controls. +The Rancher Hardening Guide is based off of controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. -- [Hardening Guide for Rancher v2.1.x with Kubernetes 1.11]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.1/) -- [Hardening Guide for Rancher v2.2.x with Kubernetes 1.13]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.2/) +The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's [Self Assessment of the CIS Kubernetes Benchmark](#cis-benchmark-rancher-self-assessment) for the full list of security controls. -### CIS Benchmark Rancher Self-Assessment +> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. + +Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +[Hardening Guide v2.3.3]({{}}/rancher/v2.x/en/security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 +[Hardening Guide v2.3]({{}}/rancher/v2.x/en/security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 +[Hardening Guide v2.2]({{}}/rancher/v2.x/en/security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 +[Hardening Guide v2.1]({{}}/rancher/v2.x/en/security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 + +### The CIS Benchmark and Self-Assessment The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). -* [CIS Kubernetes Benchmark 1.3.0 - Rancher 2.1.x with Kubernetes 1.11]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.1/) -* [CIS Kubernetes Benchmark 1.4.0 - Rancher 2.2.x with Kubernetes 1.13]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.2/) +Each version of Rancher's self assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +[Self Assessment Guide v2.3.3]({{}}/rancher/v2.x/en/security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-+-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 +[Self Assessment Guide v2.3]({{}}/rancher/v2.x/en/security/benchmark-2.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-0-2-3-2-with-kubernetes-1-15) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 +[Self Assessment Guide v2.2]({{}}/rancher/v2.x/en/security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 +[Self Assessment Guide v2.1]({{}}/rancher/v2.x/en/security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 + +### Third-party Penetration Test Reports + +Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. + +Results: + +- [Cure53 Pen Test - 7/2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) +- [Untamed Theory Pen Test- 3/2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) ### Rancher CVEs and Resolutions @@ -48,5 +96,9 @@ Rancher is committed to informing the community of security issues in our produc | [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | | [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | | [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them admin access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | | [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | + +### Security Tips and Best Practices + +Our [best practices guide]({{}}/rancher/v2.x/en/best-practices/management/#tips-for-security) includes basic tips for increasing security in Rancher. \ No newline at end of file diff --git a/content/rancher/v2.x/en/security/benchmark-2.1/_index.md b/content/rancher/v2.x/en/security/benchmark-2.1/_index.md index e2687edd5dd..faf84c76dfe 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.1/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.1/_index.md @@ -1,17 +1,15 @@ --- -title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x -weight: 104 +title: CIS Benchmark Rancher Self-Assessment Guide v2.1 +weight: 105 --- -### CIS Kubernetes Benchmark 1.3.0 - Rancher 2.1.x with Kubernetes 1.11 +This document is a companion to the Rancher v2.1 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Benchmark_Assessment.pdf) +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: -#### Overview - -The following document scores a Kubernetes 1.11.x RKE cluster provisioned according to the Rancher v2.1.x hardening guide against the CIS 1.3.0 Kubernetes benchmark. - -This document is a companion to the Rancher v2.1.x security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes 1.11 | Benchmark 1.3.0 Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. @@ -19,6 +17,8 @@ This document is to be used by Rancher operators, security teams, auditors and d For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.3.0. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Benchmark_Assessment.pdf) + #### Testing controls methodology Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. @@ -1190,7 +1190,7 @@ docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).* **Notes** -RKE does not currently implement a seperate CA for etcd certificates. +RKE does not currently implement a separate CA for etcd certificates. `--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. diff --git a/content/rancher/v2.x/en/security/benchmark-2.2/_index.md b/content/rancher/v2.x/en/security/benchmark-2.2/_index.md index b5e8902d450..0defa3142ef 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.2/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.2/_index.md @@ -1,18 +1,23 @@ --- -title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x -weight: 103 +title: CIS Benchmark Rancher Self-Assessment Guide v2.2 +weight: 104 --- +This document is a companion to the Rancher v2.2 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes 1.13 | Benchmark v1.4.0 and v1.4.1 + ### CIS Kubernetes Benchmark 1.4.0 - Rancher 2.2.x with Kubernetes 1.13 +There is no material difference in control verification checks between CIS Kubernetes Benchmark 1.4.0 and [1.4.1](https://rancher.com/docs/rancher/v2.x/en/security/benchmark-2.2/#cis-kubernetes-benchmark-1-4-1-rancher-2-2-x-with-kubernetes-1-13) + +### CIS Kubernetes Benchmark 1.4.1 - Rancher 2.2.x with Kubernetes 1.13 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Benchmark_Assessment.pdf) -#### Overview - -The following document scores a Kubernetes 1.13.x RKE cluster provisioned according to the Rancher v2.2.x hardening guide against the CIS 1.4.0 Kubernetes benchmark. - -This document is a companion to the Rancher v2.2.x security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. This document is to be used by Rancher operators, security teams, auditors and decision makers. @@ -34,7 +39,6 @@ The following scored controls do not currently pass, and Rancher Labs is working - 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more-restrictive (Scored) - 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) ### Controls @@ -562,7 +566,7 @@ In Kubernetes 1.13.x this flag is `--encryption-provider-config` docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' ``` -**Returned Value:** `encryption-provider-config=/etc/kubernetes/encryption.yaml` +**Returned Value:** `encryption-provider-config=/opt/kubernetes/encryption.yaml` **Result:** Pass @@ -575,7 +579,7 @@ Only the first provider in the list is active. **Audit** ``` bash -grep -A 1 providers: /etc/kubernetes/encryption.yaml | grep aescbc +grep -A 1 providers: /opt/kubernetes/encryption.yaml | grep aescbc ``` **Returned Value:** `- aescbc:` @@ -588,8 +592,8 @@ grep -A 1 providers: /etc/kubernetes/encryption.yaml | grep aescbc The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: -- `/etc/kubernetes/admission.yaml` -- `/etc/kubernetes/event.yaml` +- `/opt/kubernetes/admission.yaml` +- `/opt/kubernetes/event.yaml` See Host Configuration for details. @@ -607,7 +611,7 @@ docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-p docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' ``` -**Returned Value:** `--admission-control-config-file=/etc/kubernetes/admission.yaml` +**Returned Value:** `--admission-control-config-file=/opt/kubernetes/admission.yaml` **Result:** Pass @@ -631,7 +635,7 @@ docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(A docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' ``` -**Returned Value:** `--audit-policy-file=/etc/kubernetes/audit.yaml` +**Returned Value:** `--audit-policy-file=/opt/kubernetes/audit.yaml` **Result:** Pass @@ -1431,19 +1435,15 @@ docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").st #### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -**Notes** - -RKE currently runs the kubelet without the `--authorization-mode` flag. - **Audit** ``` bash docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' ``` -**Returned Value:** `null` +**Returned Value:** `--authorization-mode=Webhook` -**Result:** Fail +**Result:** Pass #### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) diff --git a/content/rancher/v2.x/en/security/benchmark-2.3.3/_index.md b/content/rancher/v2.x/en/security/benchmark-2.3.3/_index.md new file mode 100644 index 00000000000..74d07855260 --- /dev/null +++ b/content/rancher/v2.x/en/security/benchmark-2.3.3/_index.md @@ -0,0 +1,1785 @@ +--- +title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.3 +weight: 103 +--- + +This document is a companion to the Rancher v2.3.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Benchmark_Assessment.pdf) + +> The CIS Benchmark version v1.4.1 covers the security posture of Kubernetes 1.13 clusters. This self-assessment has been run against Kubernetes 1.16, using the guidelines outlined in the CIS v1.4.1 benchmark. Updates to the CIS benchmarks will be applied to this document as they are released. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.1. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. + +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. + +#### Known Scored Control Failures + +The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. + +- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) +- 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +### Controls + +--- + +## 1 - Master Node Security Configuration + +### 1.1 - API Server + +#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Notes** + +Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' +``` + +**Returned Value:** `--insecure-port=0` + +**Result:** Pass + +#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' +``` + +**Returned Value:** `--secure-port=6443` + +**Result:** Pass + +#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) + +**Note:** This deprecated flag was removed in 1.14, so it cannot be set. + +**Result:** Pass + +#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' +``` + +**Returned Value:** `AlwaysPullImages` + +**Result:** Pass + +#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' +``` + +**Returned Value:** `DenyEscalatingExec` + +**Result:** Pass + +#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) + +**Notes** + +This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: + +> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies + +Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Document + +#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' +``` + +**Returned Value:** `NamespaceLifecycle` + +**Result:** Pass + +#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) + +**Notes** + +This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' +``` + +**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` + +**Result:** Pass + +#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxage=30` + +**Result:** Pass + +#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxbackup=10` + +**Result:** Pass + +#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxsize=100` + +**Result:** Pass + +#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` + +**Result:** Pass + +#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' +``` + +**Returned Value:** `--kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Audit** (`--kubelet-client-certificate`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' +``` + +**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--kubelet-client-key`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' +``` + +**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' +``` + +**Returned Value:** `--service-account-lookup=true` + +**Result:** Pass + +#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' +``` + +**Returned Value:** `PodSecurityPolicy` + +**Result:** Pass + +#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' +``` + +**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Audit** (`--etcd-certfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' +``` + +**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` + +**Audit** (`--etcd-keyfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' +``` + +**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` + +**Result:** Pass + +#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' +``` + +**Returned Value:** `ServiceAccount` + +**Result:** Pass + +#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--tls-key-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' +``` + +**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` + +**Result:** Pass + +#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' +``` + +**Returned Value:** `NodeRestriction` + +**Result:** Pass + +#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) + +**Notes** +In Kubernetes 1.16.x this flag is `--encryption-provider-config` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' +``` + +**Returned Value:** `encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml` + +**Result:** Pass + +#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) + +**Notes** + +Only the first provider in the list is active. + +**Audit** + +``` bash +grep -A 1 providers: /etc/kubernetes/ssl/encryption.yaml | grep aescbc +``` + +**Returned Value:** `- aescbc:` + +**Result:** Pass + +#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Notes** + +The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: + +- `/etc/kubernetes/admission.yaml` + +See Host Configuration for details. + +**Audit** (Admissions plugin) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' +``` + +**Returned Value:** `EventRateLimit` + +**Audit** (`--admission-control-config-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' +``` + +**Returned Value:** `--admission-control-config-file=/etc/kubernetes/admission.yaml` + +**Result:** Pass + +#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) + +**Notes** + +`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. + +**Audit** (Feature Gate) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** (Audit Policy File) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' +``` + +**Returned Value:** `--audit-policy-file=/etc/kubernetes/audit-policy.yaml` + +**Result:** Pass + +#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Notes** + +RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### Ensure that the --authorization-mode argument includes RBAC (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' +``` + +**Returned Value:** `"--authorization-mode=Node,RBAC"` + +**Result:** Pass + +### 1.2 - Scheduler + +#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` + +**Result:** Pass + +### 1.3 - Controller Manager + +#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' +``` + +**Returned Value:** `--terminated-pod-gc-threshold=1000` + +**Result:** Pass + +#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' +``` + +**Returned Value:** `--use-service-account-credentials=true` + +**Result:** Pass + +#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' +``` + +**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' +``` + +**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) + +**Notes** + +RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` + +**Result:** Pass + +### 1.4 - Configuration Files + +#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +**Note** +This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. + +``` bash +stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 +/var/lib/cni/networks/k8s-pod-network/lock - 750 +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %a" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - 644 +/etc/cni/net.d/calico-kubeconfig - 600 +``` + +**Result:** Pass + +#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +``` bash +stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root +/var/lib/cni/networks/k8s-pod-network/lock - root:root +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %U:%G" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - root:root +/etc/cni/net.d/calico-kubeconfig - root:root +``` + +**Result:** Pass + +#### 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Notes** + +Files underneath the data dir have permissions set to `700` + +``` bash +stat -c "%n - %a" /var/lib/etcd/* + +/var/lib/etcd/member - 700 +``` + +**Audit** + +``` bash +stat -c %a /var/lib/etcd +``` + +**Returned Value:** `700` + +**Result:** Pass + +#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) + +**Notes** + +The `etcd` container runs as the `etcd` user. The data directory and files are owned by `etcd`. + +**Audit** + +``` bash +stat -c %U:%G /var/lib/etcd +``` + +**Returned Value:** `etcd:etcd` + +**Result:** Pass + +#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) + +**Notes** + +RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `640` + +**Result:** Pass + +#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `640` + +**Result:** Pass + +#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) + +**Audit** + +``` bash +ls -laR /etc/kubernetes/ssl/ |grep -v yaml +``` + +**Returned Value:** +``` bash +total 128 +drwxr-xr-x 2 root root 4096 Jul 1 19:53 . +drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem +-rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem +-rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem +-rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem +-rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem +-rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem +-rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem +-rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem +``` + +**Result:** Pass + +#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 640 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 640 +/etc/kubernetes/ssl/kube-apiserver.pem - 640 +/etc/kubernetes/ssl/kube-ca.pem - 640 +/etc/kubernetes/ssl/kube-controller-manager.pem - 640 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 640 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 640 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 640 +/etc/kubernetes/ssl/kube-node.pem - 640 +/etc/kubernetes/ssl/kube-proxy.pem - 640 +/etc/kubernetes/ssl/kube-scheduler.pem - 640 +/etc/kubernetes/ssl/kube-service-account-token.pem - 640 +``` + +**Result:** Pass + +#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*key* + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 +/etc/kubernetes/ssl/kube-node-key.pem - 600 +/etc/kubernetes/ssl/kube-proxy-key.pem - 600 +/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 +/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 +``` + +**Result:** Pass + +### 1.5 - etcd + +#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Audit** `(--cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` + +**Audit** (`--key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` + +**Result:** Pass + +#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--client-cert-auth` + +**Result:** Pass + +#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--peer-cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` + +**Audit** (`--peer-key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` + +**Result:** Pass + +#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--peer-client-cert-auth` + +**Result:** Pass + +#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) + +**Mitigation** + +RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. + +**Notes** + +`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (See Mitigation) + +#### 1.6 - General Security Primitives + +These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. + +#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) + + +Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. + +Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. + +#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) + +With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. + +#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. + +See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. + +#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) + +Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. + +#### 1.6.5 - Apply security context to your pods and containers (Not Scored) + +This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. + +Rancher allows users to set various Security Context options when launching pods via the GUI interface. + +#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) + +Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. + +#### 1.6.7 - Configure network policies as appropriate (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. + +See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. + +#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) + +Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. + +With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. + + +#### 1.7 - Pod Security Policies (PSP) + +This RKE configuration has two Pod Security Policies. + +- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. +- `restricted-psp`: This is the cluster default PSP and follows the best practices defined by controls in this section. + +#### 1.7.1 - Do not admit privileged containers (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.privileged}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.hostPID}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.hostIPC}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.hostNetwork}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" +``` + +**Returned Value:** `RunAsAny` + +**Result:** Pass + +#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted-psp -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" +``` + +**Returned Value:** `null` + +**Result:** Pass + +## 2 - Worker Node Security Configuration + +### 2.1 - Kubelet + +#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' +``` + +**Returned Value:** `--authorization-mode=Webhook` + +**Result:** Pass + +#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' +``` + +**Returned Value:** `--read-only-port=0` + +**Result:** Pass + +#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' +``` + +**Returned Value:** `--streaming-connection-idle-timeout=30m` + +**Result:** Pass + +#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' +``` + +**Returned Value:** `--protect-kernel-defaults=true` + +**Result:** Pass + +#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' +``` + +**Returned Value:** `--make-iptables-util-chains=true` + +**Result:** Pass + +#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +**Notes** +This is used by most cloud providers. Not setting this is not practical in most cases. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' +``` + +**Returned Value:** `--hostname-override=` + +**Result:** Fail + +#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' +``` + +**Returned Value:** `--event-qps=0` + +**Result:** Pass + +#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Notes** + +RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-172-31-40-84.pem` + +**Audit** (`--tls-private-key-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-172-31-40-84-key.pem` + +**Result:** Pass + +#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Notes** + +RKE handles certificate rotation through an external process. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (Not Applicable) + +#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +### 2.2 - Configuration Files + +#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `640` + +**Result:** Pass + +#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + + +#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `640` + +**Result:** Pass + +#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `640` + +**Result:** Pass + +#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/benchmark-2.3/_index.md b/content/rancher/v2.x/en/security/benchmark-2.3/_index.md new file mode 100644 index 00000000000..7a0d5193ffb --- /dev/null +++ b/content/rancher/v2.x/en/security/benchmark-2.3/_index.md @@ -0,0 +1,1795 @@ +--- +title: CIS Benchmark Rancher Self-Assessment Guide v2.3 +weight: 103 +--- + +This document is a companion to the Rancher v2.3 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes 1.15 | Benchmark v1.4.1 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Benchmark_Assessment.pdf) + +> The CIS Benchmark version v1.4.1 covers the security posture of Kubernetes 1.13 clusters. This self-assessment has been run against Kubernetes 1.15, using the guidelines outlined in the CIS v1.4.1 benchmark. Updates to the CIS benchmarks will be applied to this document as they are released. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.1. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. + +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. + +#### Known Scored Control Failures + +The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. + +- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) +- 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +### Controls + +--- + +## 1 - Master Node Security Configuration + +### 1.1 - API Server + +#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Notes** + +Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' +``` + +**Returned Value:** `--insecure-port=0` + +**Result:** Pass + +#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' +``` + +**Returned Value:** `--secure-port=6443` + +**Result:** Pass + +#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) + +**Note:** This deprecated flag was removed in 1.14, so it cannot be set. + +**Result:** Pass + +#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' +``` + +**Returned Value:** `AlwaysPullImages` + +**Result:** Pass + +#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' +``` + +**Returned Value:** `DenyEscalatingExec` + +**Result:** Pass + +#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) + +**Notes** + +This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: + +> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies + +Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Document + +#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' +``` + +**Returned Value:** `NamespaceLifecycle` + +**Result:** Pass + +#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) + +**Notes** + +This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' +``` + +**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` + +**Result:** Pass + +#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxage=5` + +**Result:** Pass + +#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxbackup=5` + +**Result:** Pass + +#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxsize=100` + +**Result:** Pass + +#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` + +**Result:** Pass + +#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Notes** + +RKE is using the kubelet's ability to automatically create self-signed certs. No CA cert is saved to verify the communication between `kube-apiserver` and `kubelet`. + +**Mitigation** + +Make sure nodes with `role:controlplane` are on the same local network as your nodes with `role:worker`. Use network ACLs to restrict connections to the kubelet port (10250/tcp) on worker nodes, only permitting it from controlplane nodes. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' +``` + +**Returned Value:** none + +**Result:** Fail (See Mitigation) + +#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Audit** (`--kubelet-client-certificate`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' +``` + +**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--kubelet-client-key`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' +``` + +**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' +``` + +**Returned Value:** `--service-account-lookup=true` + +**Result:** Pass + +#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' +``` + +**Returned Value:** `PodSecurityPolicy` + +**Result:** Pass + +#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' +``` + +**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Audit** (`--etcd-certfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' +``` + +**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` + +**Audit** (`--etcd-keyfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' +``` + +**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` + +**Result:** Pass + +#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' +``` + +**Returned Value:** `ServiceAccount` + +**Result:** Pass + +#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--tls-key-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' +``` + +**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` + +**Result:** Pass + +#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' +``` + +**Returned Value:** `NodeRestriction` + +**Result:** Pass + +#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) + +**Notes** +In Kubernetes 1.15.x this flag is `--encryption-provider-config` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' +``` + +**Returned Value:** `encryption-provider-config=/opt/kubernetes/encryption.yaml` + +**Result:** Pass + +#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) + +**Notes** + +Only the first provider in the list is active. + +**Audit** + +``` bash +grep -A 1 providers: /opt/kubernetes/encryption.yaml | grep aescbc +``` + +**Returned Value:** `- aescbc:` + +**Result:** Pass + +#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Notes** + +The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: + +- `/opt/kubernetes/admission.yaml` +- `/opt/kubernetes/event.yaml` + +See Host Configuration for details. + +**Audit** (Admissions plugin) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' +``` + +**Returned Value:** `EventRateLimit` + +**Audit** (`--admission-control-config-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' +``` + +**Returned Value:** `--admission-control-config-file=/opt/kubernetes/admission.yaml` + +**Result:** Pass + +#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) + +**Notes** + +`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. + +**Audit** (Feature Gate) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** (Audit Policy File) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' +``` + +**Returned Value:** `--audit-policy-file=/opt/kubernetes/audit.yaml` + +**Result:** Pass + +#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Notes** + +RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### Ensure that the --authorization-mode argument includes RBAC (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' +``` + +**Returned Value:** `"--authorization-mode=Node,RBAC"` + +**Result:** Pass + +### 1.2 - Scheduler + +#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` + +**Result:** Pass + +### 1.3 - Controller Manager + +#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' +``` + +**Returned Value:** `--terminated-pod-gc-threshold=1000` + +**Result:** Pass + +#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' +``` + +**Returned Value:** `--use-service-account-credentials=true` + +**Result:** Pass + +#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' +``` + +**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' +``` + +**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) + +**Notes** + +RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` + +**Result:** Pass + +### 1.4 - Configuration Files + +#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +**Note** +This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. + +``` bash +stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 +/var/lib/cni/networks/k8s-pod-network/lock - 750 +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %a" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - 664 +/etc/cni/net.d/calico-kubeconfig - 600 +``` + +**Result:** Pass + +#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +``` bash +stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root +/var/lib/cni/networks/k8s-pod-network/lock - root:root +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %U:%G" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - root:root +/etc/cni/net.d/calico-kubeconfig - root:root +``` + +**Result:** Pass + +#### 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Notes** + +Files underneath the data dir have permissions set to `700` + +``` bash +stat -c "%n - %a" /var/lib/rancher/etcd/* + +/var/lib/etcd/member - 700 +``` + +**Audit** + +``` bash +stat -c %a /var/lib/rancher/etcd +``` + +**Returned Value:** `700` + +**Result:** Pass + +#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) + +**Notes** + +The `etcd` container runs as the `etcd` user. The data directory and files are owned by `etcd`. + +**Audit** + +``` bash +stat -c %U:%G /var/lib/rancher/etcd +``` + +**Returned Value:** `etcd:etcd` + +**Result:** Pass + +#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) + +**Notes** + +RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) + +**Audit** + +``` bash +ls -laR /etc/kubernetes/ssl/ |grep -v yaml + +``` + +**Returned Value:** +``` bash +total 128 +drwxr-xr-x 2 root root 4096 Jul 1 19:53 . +drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem +-rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem +-rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem +-rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem +-rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem +-rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem +-rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem +-rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem +``` + +**Result:** Pass + +#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 644 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 644 +/etc/kubernetes/ssl/kube-apiserver.pem - 644 +/etc/kubernetes/ssl/kube-ca.pem - 644 +/etc/kubernetes/ssl/kube-controller-manager.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 644 +/etc/kubernetes/ssl/kube-node.pem - 644 +/etc/kubernetes/ssl/kube-proxy.pem - 644 +/etc/kubernetes/ssl/kube-scheduler.pem - 644 +/etc/kubernetes/ssl/kube-service-account-token.pem - 644 +``` + +**Result:** Pass + +#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*key* + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 +/etc/kubernetes/ssl/kube-node-key.pem - 600 +/etc/kubernetes/ssl/kube-proxy-key.pem - 600 +/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 +/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 +``` + +**Result:** Pass + +### 1.5 - etcd + +#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Audit** `(--cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` + +**Audit** (`--key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` + +**Result:** Pass + +#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--client-cert-auth` + +**Result:** Pass + +#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--peer-cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` + +**Audit** (`--peer-key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` + +**Result:** Pass + +#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--peer-client-cert-auth` + +**Result:** Pass + +#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) + +**Mitigation** + +RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. + +**Notes** + +`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (See Mitigation) + +#### 1.6 - General Security Primitives + +These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. + +#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) + + +Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. + +Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. + +#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) + +With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. + +#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. + +See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. + +#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) + +Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. + +#### 1.6.5 - Apply security context to your pods and containers (Not Scored) + +This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. + +Rancher allows users to set various Security Context options when launching pods via the GUI interface. + +#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) + +Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. + +#### 1.6.7 - Configure network policies as appropriate (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. + +See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. + +#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) + +Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. + +With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. + + +#### 1.7 - Pod Security Policies (PSP) + +This RKE configuration has two Pod Security Policies. + +- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. +- `restricted`: This is the cluster default PSP and follows the best practices defined by controls in this section. + +#### 1.7.1 - Do not admit privileged containers (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.privileged}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostPID}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostIPC}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostNetwork}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" +``` + +**Returned Value:** `[NET_RAW]` + +**Result:** Pass + +## 2 - Worker Node Security Configuration + +### 2.1 - Kubelet + +#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' +``` + +**Returned Value:** `--authorization-mode=Webhook` + +**Result:** Pass + +#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' +``` + +**Returned Value:** `--read-only-port=0` + +**Result:** Pass + +#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' +``` + +**Returned Value:** `--streaming-connection-idle-timeout=1800s` + +**Result:** Pass + +#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' +``` + +**Returned Value:** `--protect-kernel-defaults=true` + +**Result:** Pass + +#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' +``` + +**Returned Value:** `--make-iptables-util-chains=true` + +**Result:** Pass + +#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +**Notes** +This is used by most cloud providers. Not setting this is not practical in most cases. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' +``` + +**Returned Value:** `--hostname-override=` + +**Result:** Fail + +#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' +``` + +**Returned Value:** `--event-qps=0` + +**Result:** Pass + +#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Notes** + +RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `null` + +**Audit** (`--tls-private-key-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Notes** + +RKE handles certificate rotation through an external process. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (Not Applicable) + +#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +### 2.2 - Configuration Files + +#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + + +#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/hardening-2.1/_index.md b/content/rancher/v2.x/en/security/hardening-2.1/_index.md index 07c9338593a..6c65b6b757c 100644 --- a/content/rancher/v2.x/en/security/hardening-2.1/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.1/_index.md @@ -1,16 +1,20 @@ --- -title: Hardening Guide - Rancher v2.1.x +title: Hardening Guide v2.1 weight: 102 --- -### Hardening Guide for Rancher 2.1.x with Kubernetes 1.11 +This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.1/). ### Profile Definitions @@ -366,8 +370,8 @@ To pass the following controls in the CIS benchmark, ensure the appropriate flag Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - `--streaming-connection-idle-timeout=` -- `--protect-kernel-defaults=false` -- `--make-iptables-util-chains=false` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` - `--event-qps=0` **Remediation** @@ -866,11 +870,11 @@ Upgrade the Rancher server installation using Helm, and configure the audit log #### Reference -- +- ## 3.2 - Rancher Management Control Plane Authentication -### 3.2.1 - Change the local admin password from the default value +### 3.2.1 - Change the local administrator password from the default value **Profile Applicability** @@ -878,11 +882,11 @@ Upgrade the Rancher server installation using Helm, and configure the audit log **Description** -The local admin password should be changed from the default. +The local administrator password should be changed from the default. **Rationale** -The default admin password is common across all Rancher installations and should be changed immediately upon startup. +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. **Audit** diff --git a/content/rancher/v2.x/en/security/hardening-2.2/_index.md b/content/rancher/v2.x/en/security/hardening-2.2/_index.md index 83c191002b0..1d45f862926 100644 --- a/content/rancher/v2.x/en/security/hardening-2.2/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.2/_index.md @@ -1,16 +1,20 @@ --- -title: Hardening Guide - Rancher v2.2.x +title: Hardening Guide v2.2 weight: 101 --- -### Hardening Guide for Rancher 2.2.x with Kubernetes 1.13 +This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x with Kubernetes v1.13. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.2/). ### Profile Definitions @@ -115,7 +119,7 @@ This supports the following controls: On the control plane hosts for the Rancher HA cluster run: ``` bash -stat /etc/kubernetes/encryption.yaml +stat /opt/kubernetes/encryption.yaml ``` Ensure that: @@ -147,14 +151,14 @@ Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 ``` bash head -c 32 /dev/urandom | base64 -i - -touch /etc/kubernetes/encryption.yaml +touch /opt/kubernetes/encryption.yaml ``` - Set the file ownership to `root:root` and the permissions to `0600` ``` bash -chown root:root /etc/kubernetes/encryption.yaml -chmod 0600 /etc/kubernetes/encryption.yaml +chown root:root /opt/kubernetes/encryption.yaml +chmod 0600 /opt/kubernetes/encryption.yaml ``` - Set the contents to: @@ -175,6 +179,10 @@ resources: Where `secret` is the 32-byte base64-encoded string generated in the first step. +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + ### 1.1.3 - Install the audit log configuration on all control plane nodes. **Profile Applicability** @@ -202,7 +210,7 @@ This supports the following controls: On each control plane node, run: ``` bash -stat /etc/kubernetes/audit.yaml +stat /opt/kubernetes/audit.yaml ``` Ensure that: @@ -226,14 +234,14 @@ On nodes with the `controlplane` role: - Generate an empty configuration file: ``` bash -touch /etc/kubernetes/audit.yaml +touch /opt/kubernetes/audit.yaml ``` - Set the file ownership to `root:root` and the permissions to `0600` ``` bash -chown root:root /etc/kubernetes/audit.yaml -chmod 0600 /etc/kubernetes/audit.yaml +chown root:root /opt/kubernetes/audit.yaml +chmod 0600 /opt/kubernetes/audit.yaml ``` - Set the contents to: @@ -245,6 +253,10 @@ rules: - level: Metadata ``` +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + ### 1.1.4 - Place Kubernetes event limit configuration on each control plane host **Profile Applicability** @@ -268,8 +280,8 @@ This supports the following control: On nodes with the `controlplane` role run: ``` bash -stat /etc/kubernetes/admission.yaml -stat /etc/kubernetes/event.yaml +stat /opt/kubernetes/admission.yaml +stat /opt/kubernetes/event.yaml ``` For each file, ensure that: @@ -285,7 +297,7 @@ apiVersion: apiserver.k8s.io/v1alpha1 kind: AdmissionConfiguration plugins: - name: EventRateLimit - path: /etc/kubernetes/event.yaml + path: /opt/kubernetes/event.yaml ``` For `event.yaml` ensure that the file contains: @@ -306,17 +318,17 @@ On nodes with the `controlplane` role: - Generate an empty configuration file: ``` bash -touch /etc/kubernetes/admission.yaml -touch /etc/kubernetes/event.yaml +touch /opt/kubernetes/admission.yaml +touch /opt/kubernetes/event.yaml ``` - Set the file ownership to `root:root` and the permissions to `0600` ``` bash -chown root:root /etc/kubernetes/admission.yaml -chown root:root /etc/kubernetes/event.yaml -chmod 0600 /etc/kubernetes/admission.yaml -chmod 0600 /etc/kubernetes/event.yaml +chown root:root /opt/kubernetes/admission.yaml +chown root:root /opt/kubernetes/event.yaml +chmod 0600 /opt/kubernetes/admission.yaml +chmod 0600 /opt/kubernetes/event.yaml ``` - For `admission.yaml` set the contents to: @@ -326,7 +338,7 @@ apiVersion: apiserver.k8s.io/v1alpha1 kind: AdmissionConfiguration plugins: - name: EventRateLimit - path: /etc/kubernetes/event.yaml + path: /opt/kubernetes/event.yaml ``` - For `event.yaml` set the contents to: @@ -340,6 +352,10 @@ limits: burst: 20000 ``` +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + ## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE (See Appendix A. for full RKE `cluster.yml` example) @@ -359,6 +375,7 @@ Ensure Kubelet options are configured to match CIS controls. To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) - 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) @@ -371,8 +388,9 @@ To pass the following controls in the CIS benchmark, ensure the appropriate flag Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - `--streaming-connection-idle-timeout=` -- `--protect-kernel-defaults=false` -- `--make-iptables-util-chains=false` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` - `--event-qps=0` - `--anonymous-auth=false` - `--feature-gates="RotateKubeletServerCertificate=true"` @@ -386,6 +404,7 @@ Inspect the Kubelet containers on all hosts and verify that they are running wit services: kubelet: extra_args: + authorization-mode: "Webhook" streaming-connection-idle-timeout: "" protect-kernel-defaults: "true" make-iptables-util-chains: "true" @@ -454,14 +473,14 @@ To pass the following controls for the kube-api server ensure RKE configuration --profiling=false --service-account-lookup=true --enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---encryption-provider-config=/etc/kubernetes/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml +--encryption-provider-config=/opt/kubernetes/encryption.yaml +--admission-control-config-file=/opt/kubernetes/admission.yaml --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxage=5 --audit-log-maxbackup=5 --audit-log-maxsize=100 --audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit.yaml +--audit-policy-file=/opt/kubernetes/audit.yaml --tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" ``` @@ -484,17 +503,18 @@ services: profiling: "false" service-account-lookup: "true" enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" audit-log-path: "/var/log/kube-audit/audit-log.json" audit-log-maxage: "5" audit-log-maxbackup: "5" audit-log-maxsize: "100" audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml + audit-policy-file: /opt/kubernetes/audit.yaml tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" extra_binds: - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" ``` - Reconfigure the cluster: @@ -503,6 +523,10 @@ services: rke up --config cluster.yml ``` +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + ### 2.1.3 - Configure scheduler options **Profile Applicability** @@ -889,11 +913,11 @@ Upgrade the Rancher server installation using Helm, and configure the audit log #### Reference -- +- ## 3.2 - Rancher Management Control Plane Authentication -### 3.2.1 - Change the local admin password from the default value +### 3.2.1 - Change the local administrator password from the default value **Profile Applicability** @@ -901,11 +925,11 @@ Upgrade the Rancher server installation using Helm, and configure the audit log **Description** -The local admin password should be changed from the default. +The local administrator password should be changed from the default. **Rationale** -The default admin password is common across all Rancher installations and should be changed immediately upon startup. +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. **Audit** @@ -1035,6 +1059,7 @@ services: kubelet: extra_args: streaming-connection-idle-timeout: "1800s" + authorization-mode: "Webhook" protect-kernel-defaults: "true" make-iptables-util-chains: "true" event-qps: "0" @@ -1048,17 +1073,18 @@ services: profiling: "false" service-account-lookup: "true" enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" audit-log-path: "/var/log/kube-audit/audit-log.json" audit-log-maxage: "5" audit-log-maxbackup: "5" audit-log-maxsize: "100" audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml + audit-policy-file: /opt/kubernetes/audit.yaml tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" extra_binds: - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" scheduler: extra_args: profiling: "false" diff --git a/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md new file mode 100644 index 00000000000..b001262a839 --- /dev/null +++ b/content/rancher/v2.x/en/security/hardening-2.3.3/_index.md @@ -0,0 +1,2042 @@ +--- +title: Hardening Guide v2.3.3 +weight: 100 +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3.3/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher RKE Kubernetes cluster host configuration + +(See Appendix A. for full ubuntu `cloud-config` example) + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `vm.panic_on_oom = 0` + +``` bash +sysctl vm.panic_on_oom +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +- Verify `kernel.keys.root_maxkeys = 1000000` + +``` bash +sysctl kernel.keys.root_maxkeys +``` + +- Verify `kernel.keys.root_maxbytes = 25000000` + +``` bash +sysctl kernel.keys.root_maxbytes +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxkeys=1000000 +kernel.keys.root_maxbytes=25000000 +``` + +- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory has permissions of 700 or more restrictive. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. + +**Audit** + +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %a /var/lib/etcd +``` + +Verify that the permissions are `700` or more restrictive. + +**Remediation** + +Follow the steps as documented in [1.4.12]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. + +### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Audit** + +On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %U:%G /var/lib/etcd +``` + +Verify that the ownership is set to `etcd:etcd`. + +**Remediation** + +- On the etcd server node(s) add the `etcd` user: + +``` bash +useradd -c "Etcd user" -d /var/lib/etcd etcd +``` + +Record the uid/gid: + +``` bash +id etcd +``` + +- Add the following to the RKE `cluster.yml` etcd section under `services`: + +``` yaml +services: + etcd: + uid: + gid: +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix B. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy +--encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml +--admission-control-config-file=/etc/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=30 +--audit-log-maxbackup=10 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/etc/kubernetes/audit-policy.yaml +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" +``` + +For k8s 1.14 `enable-admission-plugins` should be + +``` yaml + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole restricted-clusterrole +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding restricted-clusterrolebinding +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted-psp +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted-psp + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: restricted-clusterrole + rules: + - apiGroups: + - extensions + resourceNames: + - restricted-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: restricted-clusterrolebinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: restricted-clusterrole + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.3.3 or above, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local admin password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local admin password should be changed from the default. + +**Rationale** + +The default admin password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +## 4.1 - Rancher Kubernetes Custom Cluster Configuration via RKE + +(See Appendix C. for full RKE template example) + +### 4.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy +--encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml +--admission-control-config-file=/etc/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=30 +--audit-log-maxbackup=10 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/etc/kubernetes/audit-policy.yaml +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" +``` + +For k8s 1.14 `enable-admission-plugins` should be + +``` yaml + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 4.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.5 - Check PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole restricted-clusterrole +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted-psp +``` + +--- + +## Appendix A - Complete ubuntu `cloud-config` Example + +`cloud-config` file to automate hardening manual steps on nodes deployment. + +``` +#cloud-config +bootcmd: +- apt-get update +- apt-get install -y apt-transport-https +apt: + sources: + docker: + source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" + keyid: 0EBFCD88 +packages: +- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] +- jq +write_files: +# 1.1.1 - Configure default sysctl settings on all hosts +- path: /etc/sysctl.d/90-kubelet.conf + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxkeys=1000000 + kernel.keys.root_maxbytes=25000000 +# 1.4.12 etcd user +groups: + - etcd +users: + - default + - name: etcd + gecos: Etcd user + primary_group: etcd + homedir: /var/lib/etcd +# 1.4.11 etcd data dir +runcmd: + - chmod 0700 /var/lib/etcd + - usermod -G docker -a ubuntu + - sysctl -p /etc/sysctl.d/90-kubelet.conf +``` + +## Appendix B - Complete RKE `cluster.yml` Example + +Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. + +{{% accordion id="cluster-1.14" label="RKE yaml for k8s 1.14" %}} + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +authorization: {} +bastion_host: + ssh_agent_auth: false +cloud_provider: {} +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.14.9-rancher1-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +restore: + restore: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube-api: + always_pull_images: true + audit_log: + enabled: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: 'false' + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube-controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + protect-kernel-defaults: 'true' + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: {} + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' +ssh_agent_auth: false +``` + +{{% /accordion %}} + +{{% accordion id="cluster-1.15" label="RKE yaml for k8s 1.15" %}} + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.15.6-rancher1-2 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +ssh_agent_auth: false +``` + +{{% /accordion %}} + +{{% accordion id="cluster-1.16" label="RKE yaml for k8s 1.16" %}} + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.16.3-rancher1-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +ssh_agent_auth: false +``` + +{{% /accordion %}} + +## Appendix C - Complete RKE Template Example + +Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. + + +{{% accordion id="k8s-1.14" label="RKE template for k8s 1.14" %}} + +``` yaml +# +# Cluster Config +# +answers: {} +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: false +name: test-35378 +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + authorization: {} + bastion_host: + ssh_agent_auth: false + cloud_provider: {} + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.14.9-rancher1-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal + restore: + restore: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube-api: + always_pull_images: true + audit_log: + enabled: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: 'false' + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube-controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + protect-kernel-defaults: 'true' + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: {} + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +{{% /accordion %}} + +{{% accordion id="k8s-1.15" label="RKE template for k8s 1.15" %}} + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.6-rancher1-2 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +{{% /accordion %}} + +{{% accordion id="k8s-1.16" label="RKE template for k8s 1.16" %}} + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.16.3-rancher1-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +{{% /accordion %}} diff --git a/content/rancher/v2.x/en/security/hardening-2.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3/_index.md new file mode 100644 index 00000000000..30b9a6f8361 --- /dev/null +++ b/content/rancher/v2.x/en/security/hardening-2.3/_index.md @@ -0,0 +1,1546 @@ +--- +title: Hardening Guide v2.3 +weight: 100 +--- +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.0-v2.3.2. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher HA Kubernetes cluster host configuration + +(See Appendix A. for full ubuntu `cloud-config` example) + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `vm.panic_on_oom = 0` + +``` bash +sysctl vm.panic_on_oom +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +- Verify `kernel.keys.root_maxkeys = 1000000` + +``` bash +sysctl kernel.keys.root_maxkeys +``` + +- Verify `kernel.keys.root_maxbytes = 25000000` + +``` bash +sysctl kernel.keys.root_maxbytes +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxkeys=1000000 +kernel.keys.root_maxbytes=25000000 +``` + +- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### 1.1.2 - Install the encryption provider configuration on all control plane nodes + +**Profile Applicability** + +- Level 1 + +**Description** + +Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: + +**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` + +**Rationale** + +This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. + +This supports the following controls: + +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) + +**Audit** + +On the control plane hosts for the Rancher HA cluster run: + +``` bash +stat /opt/kubernetes/encryption.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. + +**Remediation** + +- Generate a key and an empty configuration file: + +``` bash +head -c 32 /dev/urandom | base64 -i - +touch /opt/kubernetes/encryption.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/encryption.yaml +chmod 0600 /opt/kubernetes/encryption.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `secret` is the 32-byte base64-encoded string generated in the first step. + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.3 - Install the audit log configuration on all control plane nodes. + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. + +**Rationale** + +The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. + +This supports the following controls: + +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) + +**Audit** + +On each control plane node, run: + +``` bash +stat /opt/kubernetes/audit.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/audit.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/audit.yaml +chmod 0600 /opt/kubernetes/audit.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.4 - Place Kubernetes event limit configuration on each control plane host + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. + +**Rationale** + +Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. + +This supports the following control: + +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Audit** + +On nodes with the `controlplane` role run: + +``` bash +stat /opt/kubernetes/admission.yaml +stat /opt/kubernetes/event.yaml +``` + +For each file, ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` + +For `admission.yaml` ensure that the file contains: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +For `event.yaml` ensure that the file contains: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/admission.yaml +touch /opt/kubernetes/event.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/admission.yaml +chown root:root /opt/kubernetes/event.yaml +chmod 0600 /opt/kubernetes/admission.yaml +chmod 0600 /opt/kubernetes/event.yaml +``` + +- For `admission.yaml` set the contents to: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +- For `event.yaml` set the contents to: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory has permissions of 700 or more restrictive. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. + +**Audit** + +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %a /var/lib/rancher/etcd +``` + +Verify that the permissions are `700` or more restrictive. + +**Remediation** + +Follow the steps as documented in [1.4.12]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. + +### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Audit** + +On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %U:%G /var/lib/rancher/etcd +``` + +Verify that the ownership is set to `etcd:etcd`. + +**Remediation** + +- On the etcd server node(s) add the `etcd` user: + +``` bash +useradd etcd +``` + +Record the uid/gid: + +``` bash +id etcd +``` + +- Add the following to the RKE `cluster.yml` etcd section under `services`: + +``` yaml +services: + etcd: + uid: + gid: +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix B. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + extra_args: + authorization-mode: "Webhook" + streaming-connection-idle-timeout: "" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" +--encryption-provider-config=/opt/kubernetes/encryption.yaml +--admission-control-config-file=/opt/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=5 +--audit-log-maxbackup=5 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/opt/kubernetes/audit.yaml +--tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + … + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole psp:restricted +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding psp:restricted +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.3.x, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local administrator password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local administrator password should be changed from the default. + +**Rationale** + +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +--- + +## Appendix A - Complete ubuntu `cloud-config` Example + +`cloud-config` file to automate hardening manual steps on nodes deployment. + +``` +#cloud-config +bootcmd: +- apt-get update +- apt-get install -y apt-transport-https +apt: + sources: + docker: + source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" + keyid: 0EBFCD88 +packages: +- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] +- jq +write_files: +# 1.1.1 - Configure default sysctl settings on all hosts +- path: /etc/sysctl.d/90-kubelet.conf + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxkeys=1000000 + kernel.keys.root_maxbytes=25000000 +# 1.1.2 encription provider +- path: /opt/kubernetes/encryption.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: QRCexFindur3dzS0P/UmHs5xA6sKu58RbtWOQFarfh4= + - identity: {} +# 1.1.3 audit log +- path: /opt/kubernetes/audit.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + - level: Metadata +# 1.1.4 event limit +- path: /opt/kubernetes/admission.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: apiserver.k8s.io/v1alpha1 + kind: AdmissionConfiguration + plugins: + - name: EventRateLimit + path: /opt/kubernetes/event.yaml +- path: /opt/kubernetes/event.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 +# 1.4.12 etcd user +groups: + - etcd +users: + - default + - name: etcd + gecos: Etcd user + primary_group: etcd + homedir: /var/lib/etcd +# 1.4.11 etcd data dir +runcmd: + - chmod 0700 /var/lib/etcd + - usermod -G docker -a ubuntu + - sysctl -p /etc/sysctl.d/90-kubelet.conf +``` + +## Appendix B - Complete RKE `cluster.yml` Example + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] + +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "1800s" + authorization-mode: "Webhook" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + services: + etcd: + uid: 1001 + gid: 1001 +addons: | + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +## Appendix C - Complete RKE Template Example + +``` yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + ignore_docker_version: true +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1001 + retention: 72h + snapshot: false + uid: 1001 + kube_api: + always_pull_images: false + extra_args: + admission-control-config-file: /opt/kubernetes/admission.yaml + anonymous-auth: 'false' + audit-log-format: json + audit-log-maxage: '5' + audit-log-maxbackup: '5' + audit-log-maxsize: '100' + audit-log-path: /var/log/kube-audit/audit-log.json + audit-policy-file: /opt/kubernetes/audit.yaml + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy + encryption-provider-config: /opt/kubernetes/encryption.yaml + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/var/log/kube-audit:/var/log/kube-audit' + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` diff --git a/content/rancher/v2.x/en/security/security-scan/_index.md b/content/rancher/v2.x/en/security/security-scan/_index.md new file mode 100644 index 00000000000..f2ba6ebb3bc --- /dev/null +++ b/content/rancher/v2.x/en/security/security-scan/_index.md @@ -0,0 +1,66 @@ +--- +title: Security Scans +weight: 1 +--- + +_Available as of v2.4.0-alpha1_ + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. + +The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. + +When Rancher runs a CIS Security Scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. + +To check clusters for CIS Kubernetes Benchmark compliance, the security scan leverages [kube-bench,](https://github.com/aquasecurity/kube-bench) an open-source tool from Aqua Security. + +### About the Generated Report + +Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. + +To determine which version of the [Benchmark](https://www.cisecurity.org/benchmark/kubernetes/) to use in the scan, Rancher chooses a version that is appropriate for the cluster's Kubernetes version. The Benchmark version is included in the generated report. + +Each test in the report is identified by its corresponding Scored test in the Benchmark. For example, if a cluster fails test 1.3.6, you can look up the description and rationale for the section 1.3.6 in the Benchmark itself, or in Rancher's [hardening guide for the Kubernetes version that the cluster is using.]({{}}/rancher/v2.x/en/security/#rancher-hardening-guide) Recommendations marked as Not Scored in the Benchmark are not included in the report. + +Similarly, for information on how to manually audit the test result, you could look up section 1.3.6 in Rancher's [self-assessment guide for the corresponding Kubernetes version.]({{}}/rancher/v2.x/en/security/#the-cis-benchmark-and-self-assessment) + +### Prerequisites + +To run security scans on a cluster and access the generated reports, you must be an [Administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [Cluster Owner.]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/) + +Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. + +The security scan cannot run in a cluster that has Windows nodes. + +### Running a Scan + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Click **Run Scan.** + +**Result:** A report is generated and displayed in the **CIS Scans** page. To see details of the report, click the report's name. + +### Skipping a Test + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Click the name of the report that has tests you want to skip. +1. A **Skip** button is displayed next to each failed test. Click **Skip** for each test that should be skipped. + +**Result:** The tests will be skipped on the next scan. + +To re-run the security scan, go to the top of the page and click **Run Scan.** + +### Un-skipping a Test + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Click the name of the report that has tests you want to un-skip. +1. An **Unskip** button is displayed next to each skipped test. Click **Unskip** for each test that should not be skipped. + +**Result:** The tests will not be skipped on the next scan. + +To re-run the security scan, go to the top of the page and click **Run Scan.** + +### Deleting a Report + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Go to the report that should be deleted. +1. Click the **Ellipsis (...) > Delete.** +1. Click **Delete.** \ No newline at end of file diff --git a/content/rancher/v2.x/en/system-tools/_index.md b/content/rancher/v2.x/en/system-tools/_index.md index db524751cd4..10a48611e45 100644 --- a/content/rancher/v2.x/en/system-tools/_index.md +++ b/content/rancher/v2.x/en/system-tools/_index.md @@ -3,12 +3,20 @@ title: System Tools weight: 6001 --- -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for Rancher HA]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/). The tasks include: +System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for installing Rancher on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). The tasks include: * Collect logging and system metrics from nodes. * Remove Kubernetes resources created by Rancher. -### Download System Tools +The following commands are available: + +| Command | Description +|---|--- +| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. +| [stats](#stats) | Stream system metrics from nodes. +| [remove](#remove) | Remove Kubernetes resources created by Rancher. + +# Download System Tools You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. @@ -31,29 +39,19 @@ After you download the tools, complete the following actions: chmod +x system-tools ``` -### Using System Tools +# Logs -The following subcommands are available: - -| Command | Description -|---|--- -| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. -| [stats](#stats) | Stream system metrics from nodes. -| [remove](#remove) | Remove Kubernetes resources created by Rancher. - -### Logs - -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for Rancher HA]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/). See [Troubleshooting]({{< baseurl >}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. +The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). See [Troubleshooting]({{< baseurl >}}//rancher/v2.x/en/troubleshooting/) for a list of core Kubernetes cluster components. System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. -#### Usage +### Usage ``` ./system-tools_darwin-amd64 logs --kubeconfig ``` -#### Options +The following are the options for the logs command: | Option | Description | ------------------------------------------------------ | ------------------------------------------------------ @@ -61,19 +59,19 @@ System Tools will use the provided kubeconfig file to deploy a DaemonSet, that w | `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. | `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. -### Stats +# Stats -The stats subcommand will display system metrics from nodes in [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters or [RKE cluster as used for Rancher HA]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/). +The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.x/en/installation/k8s-install/kubernetes-rke/). System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. -#### Usage +### Usage ``` ./system-tools_darwin-amd64 stats --kubeconfig ``` -#### Options +The following are the options for the stats command: | Option | Description | ------------------------------------------------------ | ------------------------------ @@ -81,12 +79,12 @@ System Tools will deploy a DaemonSet, and run a predefined command based on `sar | `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. | `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. -### Remove - -When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the remove subcommand to remove the Kubernetes resources. When you use the remove subcommand, the following resources will be removed: +# Remove >**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{< baseurl >}}/rancher/v2.x/en/backups/backups) before executing the command. +When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: + - The Rancher deployment namespace (`cattle-system` by default). - Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates as of v2.1.0. - Labels, annotations, and finalizers. @@ -99,7 +97,7 @@ When you install Rancher on a Kubernetes cluster, it will create Kubernetes reso > >These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. -#### Usage +### Usage When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. @@ -109,7 +107,7 @@ When you run the command below, all the resources listed [above](#remove) will b ./system-tools remove --kubeconfig --namespace ``` -#### Options +The following are the options for the `remove` command: | Option | Description | ---------------------------------------------- | ------------ diff --git a/content/rancher/v2.x/en/troubleshooting/_index.md b/content/rancher/v2.x/en/troubleshooting/_index.md index 5177b1eff07..7f6b30c3891 100644 --- a/content/rancher/v2.x/en/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/_index.md @@ -28,9 +28,9 @@ This section contains information to help you troubleshoot issues when using Ran When you experience name resolution issues in your cluster. -- [Rancher HA]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/rancherha/) +- [Troubleshooting Rancher installed on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/rancherha/) - If you experience issues with your [High Availability (HA) Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) + If you experience issues with your [Rancher server installed on Kubernetes]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/) - [Imported clusters]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/imported-clusters/) diff --git a/content/rancher/v2.x/en/troubleshooting/dns/_index.md b/content/rancher/v2.x/en/troubleshooting/dns/_index.md index 431942c9a44..f64f6e5729b 100644 --- a/content/rancher/v2.x/en/troubleshooting/dns/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/dns/_index.md @@ -7,7 +7,7 @@ The commands/steps listed on this page can be used to check name resolution issu Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. -Before running the DNS checks, make sure that [the overlay network is functioning correctly]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. +Before running the DNS checks, check the [default DNS provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. ### Check if DNS pods are running @@ -15,7 +15,13 @@ Before running the DNS checks, make sure that [the overlay network is functionin kubectl -n kube-system get pods -l k8s-app=kube-dns ``` -Example output: +Example output when using CoreDNS: +``` +NAME READY STATUS RESTARTS AGE +coredns-799dffd9c4-6jhlz 1/1 Running 0 76m +``` + +Example output when using kube-dns: ``` NAME READY STATUS RESTARTS AGE kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s @@ -123,9 +129,45 @@ command terminated with exit code 1 Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. -### Check upstream nameservers in kubedns container +### CoreDNS specific -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for `kube-dns`. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). +#### Check CoreDNS logging + +``` +kubectl -n kube-system logs -l k8s-app=kube-dns +``` + +#### Check configuration + +CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. + +``` +kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} +``` + +#### Check upstream nameservers in resolv.conf + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. + +``` +kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' +``` + +#### Enable query logging + +Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: + +``` +kubectl get configmap -n kube-system coredns -o json | kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - +``` + +All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). + +### kube-dns specific + +#### Check upstream nameservers in kubedns container + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). Use the following command to check the upstream nameservers used by the kubedns container: diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md index e224efc44ce..0c73699ee9f 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md @@ -1,470 +1,18 @@ --- -title: Kubernetes components +title: Kubernetes Components weight: 100 --- -The commands/steps listed on this page apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. -## Diagram +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd) +- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane) +- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy) +- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic) + +# Kubernetes Component Diagram ![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -## etcd - -This section applies to nodes with the `etcd` role. - -### Is etcd container is running - -The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name=etcd$ -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd -``` - -### etcd container logging - -The logging of the container can contain information on what the problem could be. - -``` -docker logs etcd -``` - -* `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` - -A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. - -* `xxx is starting a new election at term x` - -The etcd cluster has lost it's quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. - -* `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` - -The host firewall is preventing network communication. - -* `rafthttp: request cluster ID mismatch` - -The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. - -* `rafthttp: failed to find member` - -The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. - -### etcd cluster and connectivity checks - -The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) - -* Check etcd members on all nodes - -Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl member list -``` - -Command when internal address is configured on the host: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" -``` - -Example output: -``` -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -``` - -* Check endpoint status - -The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Command when internal address is configured on the host: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | -| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | -| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -* Check endpoint health - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Command when internal address is configured on the host: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Example output: -``` -https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms -``` - -* Check connectivity on port TCP/2379 - -Command when no internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health"; -done -``` - -Command when internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health"; -done -``` - -If you are running on an operating system without `curl` (for example, RancherOS), you can use the following command which uses a Docker container to run the `curl` command. - -Command when no internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - docker run --net=host -v /opt/rke/etc/kubernetes/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Command when internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - docker run --net=host -v /opt/rke/etc/kubernetes/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Example output: -``` -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -``` - -* Check connectivity on port TCP/2380 - -Command when no internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version"; -done -``` - -Command when internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version"; -done -``` - -If you are running on an operating system without `curl` (for example, RancherOS), you can use the following command which uses a Docker container to run the `curl` command. - -Command when no internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v /opt/rke/etc/kubernetes/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Command when internal address is configured on the host: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v /opt/rke/etc/kubernetes/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Example output: -``` -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -``` - -### etcd alarms - -etcd will trigger alarms, for instance when it runs out of space. - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl alarm list -``` - -Command when internal address is configured on the host: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output when NOSPACE alarm is triggered: -``` -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -``` - -### etcd space errors - -Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. - -Resolution: - -* Compact the keyspace - -Command when no internal address is configured on the host: -``` -rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') -docker exec etcd etcdctl compact "$rev" -``` - -Command when internal address is configured on the host: -``` -rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" -``` - -Example output: -``` -compacted revision xxx -``` - -* Defrag all etcd members - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Command when internal address is configured on the host: -``` -docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" -``` - -Example output: -``` -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -``` - -* Check endpoint status - -Command when no internal address is configured on the host: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Command when internal address is configured on the host: -``` -docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -### Log level - -The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. - -Command when no internal address is configured on the host: -``` -curl -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) https://localhost:2379/config/local/log -``` - -Command when internal address is configured on the host: -``` -curl -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv $ETCDCTL_ENDPOINT)/config/local/log -``` - -To reset the log level back to the default (`INFO`), you can use the following command. - -Command when no internal address is configured on the host: -``` -curl -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) https://localhost:2379/config/local/log -``` - -Command when internal address is configured on the host: -``` -curl -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv $ETCDCTL_ENDPOINT)/config/local/log -``` - -## controlplane - -This section applies to nodes with the `controlplane` role. - -### Are the containers for controlplane running - -There are three specific containers launched on nodes with the `controlpane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -### controlplane container logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` - -## nginx-proxy - -The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. - -### Is the container running - -The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name=nginx-proxy -``` - -Example output: - -``` -docker ps -a -f=name=nginx-proxy -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy -``` - -### Check generated NGINX configuration - -The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: - -``` -docker exec nginx-proxy cat /etc/nginx/nginx.conf -``` - -Example output: -``` -error_log stderr notice; - -worker_processes auto; -events { - multi_accept on; - use epoll; - worker_connections 1024; -} - -stream { - upstream kube_apiserver { - - server ip_of_controlplane_node1:6443; - - server ip_of_controlplane_node2:6443; - - } - - server { - listen 6443; - proxy_pass kube_apiserver; - proxy_timeout 30; - proxy_connect_timeout 2s; - - } - -} -``` - -### nginx-proxy container logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs nginx-proxy -``` - -## worker and generic - -This section applies to every node as it includes components that run on nodes with any role. - -### Are the containers running - -There are three specific containers launched on nodes with the `controlpane` role: - -* kubelet -* kube-proxy - -The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name='kubelet|kube-proxy' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy -a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet -``` - -### container logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kubelet -docker logs kube-proxy -``` +Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md new file mode 100644 index 00000000000..a94b1a04ee7 --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md @@ -0,0 +1,40 @@ +--- +title: Troubleshooting Controlplane Nodes +weight: 2 +--- + +This section applies to nodes with the `controlplane` role. + +# Check if the Controlplane Containers are Running + +There are three specific containers launched on nodes with the `controlplane` role: + +* `kube-apiserver` +* `kube-controller-manager` +* `kube-scheduler` + +The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver +f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler +bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager +``` + +# Controlplane Container Logging + +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kube-apiserver +docker logs kube-controller-manager +docker logs kube-scheduler +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md new file mode 100644 index 00000000000..6cdb9fefdaa --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md @@ -0,0 +1,365 @@ +--- +title: Troubleshooting etcd Nodes +weight: 1 +--- + +This section contains commands and tips for troubleshooting nodes with the `etcd` role. + +This page covers the following topics: + +- [Checking if the etcd Container is Running](#checking-if-the-etcd-container-is-running) +- [etcd Container Logging](#etcd-container-logging) +- [etcd Cluster and Connectivity Checks](#etcd-cluster-and-connectivity-checks) + - [Check etcd Members on all Nodes](#check-etcd-members-on-all-nodes) + - [Check Endpoint Status](#check-endpoint-status) + - [Check Endpoint Health](#check-endpoint-health) + - [Check Connectivity on Port TCP/2379](#check-connectivity-on-port-tcp-2379) + - [Check Connectivity on Port TCP/2380](#check-connectivity-on-port-tcp-2380) +- [etcd Alarms](#etcd-alarms) +- [etcd Space Errors](#etcd-space-errors) +- [Log Level](#log-level) +- [etcd Content](#etcd-content) + - [Watch Streaming Events](#watch-streaming-events) + - [Query etcd Directly](#query-etcd-directly) +- [Replacing Unhealthy etcd Nodes](#replacing-unhealthy-etcd-nodes) + +# Checking if the etcd Container is Running + +The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name=etcd$ +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd +``` + +# etcd Container Logging + +The logging of the container can contain information on what the problem could be. + +``` +docker logs etcd +``` +| Log | Explanation | +|-----|------------------| +| `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` | A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. | +| `xxx is starting a new election at term x` | The etcd cluster has lost its quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. | +| `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` | The host firewall is preventing network communication. | +| `rafthttp: request cluster ID mismatch` | The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. | +| `rafthttp: failed to find member` | The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. + +# etcd Cluster and Connectivity Checks + +The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) + +### Check etcd Members on all Nodes + +Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. + +Command: +``` +docker exec etcd etcdctl member list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" +``` + +Example output: +``` +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +``` + +### Check Endpoint Status + +The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. + +Command: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | +| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | +| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Check Endpoint Health + +Command: +``` +docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Example output: +``` +https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms +``` + +### Check Connectivity on Port TCP/2379 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health" + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Example output: +``` +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +``` + +### Check Connectivity on Port TCP/2380 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Example output: +``` +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +``` + +# etcd Alarms + +etcd will trigger alarms, for instance when it runs out of space. + +Command: +``` +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output when NOSPACE alarm is triggered: +``` +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +``` + +# etcd Space Errors + +Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. + +Resolutions: + +- [Compact the Keyspace](#compact-the-keyspace) +- [Defrag All etcd Members](#defrag-all-etcd-members) +- [Check Endpoint Status](#check-endpoint-status) +- [Disarm Alarm](#disarm-alarm) + +### Compact the Keyspace + +Command: +``` +rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') +docker exec etcd etcdctl compact "$rev" +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" +``` + +Example output: +``` +compacted revision xxx +``` + +### Defrag All etcd Members + +Command: +``` +docker exec etcd etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" +``` + +Example output: +``` +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +``` + +### Check Endpoint Status + +Command: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Disarm Alarm + +After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. + +Command: +``` +docker exec etcd etcdctl alarm list +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output: +``` +docker exec etcd etcdctl alarm list +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +# Log Level + +The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +To reset the log level back to the default (`INFO`), you can use the following command. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +# etcd Content + +If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. + +### Watch Streaming Events + +Command: +``` +docker exec etcd etcdctl watch --prefix /registry +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry +``` + +If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. + +### Query etcd Directly + +Command: +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only +``` + +You can process the data to get a summary of count per key, using the command below: + +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr +``` + +# Replacing Unhealthy etcd Nodes + +When a node in your etcd cluster becomes unhealthy, the recommended approach is to fix or remove the failed or unhealthy node before adding a new etcd node to the cluster. diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md new file mode 100644 index 00000000000..70505e96280 --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md @@ -0,0 +1,69 @@ +--- +title: Troubleshooting nginx-proxy +weight: 3 +--- + +The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. + +# Check if the Container is Running + +The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name=nginx-proxy +``` + +Example output: + +``` +docker ps -a -f=name=nginx-proxy +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy +``` + +# Check Generated NGINX Configuration + +The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: + +``` +docker exec nginx-proxy cat /etc/nginx/nginx.conf +``` + +Example output: +``` +error_log stderr notice; + +worker_processes auto; +events { + multi_accept on; + use epoll; + worker_connections 1024; +} + +stream { + upstream kube_apiserver { + + server ip_of_controlplane_node1:6443; + + server ip_of_controlplane_node2:6443; + + } + + server { + listen 6443; + proxy_pass kube_apiserver; + proxy_timeout 30; + proxy_connect_timeout 2s; + + } + +} +``` + +# nginx-proxy Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs nginx-proxy +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md new file mode 100644 index 00000000000..d102694fe45 --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md @@ -0,0 +1,35 @@ +--- +title: Troubleshooting Worker Nodes and Generic Components +weight: 4 +--- + +This section applies to every node as it includes components that run on nodes with any role. + +# Check if the Containers are Running + +There are three specific containers launched on nodes with the `controlplane` role: + +* kubelet +* kube-proxy + +The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name='kubelet|kube-proxy' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy +a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet +``` + +# Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kubelet +docker logs kube-proxy +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md index 06d63135786..c8eae70b743 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-resources/_index.md @@ -112,6 +112,22 @@ kubectl -n ingress-nginx logs -l app=ingress-nginx kubectl -n ingress-nginx get events ``` +#### Debug logging + +To enable debug logging: + +``` +kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' +``` + +#### Check configuration + +Retrieve generated configuration in each pod: + +``` +kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done +``` + ### Rancher agents Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. diff --git a/content/rancher/v2.x/en/troubleshooting/networking/_index.md b/content/rancher/v2.x/en/troubleshooting/networking/_index.md index 75173ccd167..d76fbf67773 100644 --- a/content/rancher/v2.x/en/troubleshooting/networking/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/networking/_index.md @@ -9,13 +9,13 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. ### Check if overlay network is functioning correctly The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. -To test the overlay network, you can launch the following `DaemonSet` definition. This will run an `alpine` container on every host, which we will use to run a `ping` test between containers on all hosts. +To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `busybox` container on every host, which we will use to run a `ping` test between containers on all hosts. 1. Save the following file as `ds-overlaytest.yml` @@ -38,14 +38,14 @@ To test the overlay network, you can launch the following `DaemonSet` definition containers: - image: busybox:1.28 imagePullPolicy: Always - name: alpine + name: busybox command: ["sh", "-c", "tail -f /dev/null"] terminationMessagePath: /dev/termination-log ``` 2. Launch it using `kubectl create -f ds-overlaytest.yml` 3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following command to let each container on every host ping each other (it's a single line command). +4. Run the following command, from the same location, to let each container on every host ping each other (it's a single line bash command). ``` echo "=> Start network overlay test"; kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End network overlay test" @@ -58,7 +58,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition => End network overlay test ``` -If you see error in the output, that means that the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for overlay networking are not opened between the hosts indicated. +If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. Example error output of a situation where NODE1 had the UDP ports blocked. @@ -75,7 +75,7 @@ NODE1 cannot reach NODE3 => End network overlay test ``` -Cleanup the alpine DaemonSet by running `kubectl delete ds/overlaytest`. +Cleanup the busybox DaemonSet by running `kubectl delete ds/overlaytest`. ### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices diff --git a/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md b/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md index 5a90616e985..a30b664c9e2 100644 --- a/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/rancherha/_index.md @@ -3,7 +3,7 @@ title: Rancher HA weight: 104 --- -The commands/steps listed on this page can be used to check your Rancher HA installation. +The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml`). diff --git a/content/rancher/v2.x/en/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/_index.md index 2c4cf4c0b4e..5fdcdc3dc16 100644 --- a/content/rancher/v2.x/en/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/_index.md @@ -13,8 +13,8 @@ aliases: In the event that your Rancher Server does not upgrade successfully, you can rollback to your installation prior to upgrade: -- [Single-Node Rollbacks]({{< baseurl >}}/rancher/v2.x/en/upgrades/single-node-rollbacks) -- [High-Availability Rollbacks]({{< baseurl >}}/rancher/v2.x/en/upgrades/ha-server-rollbacks) +- [Rollbacks for Rancher installed with Docker]({{}}/rancher/v2.x/en/upgrades/single-node-rollbacks) +- [Rollbacks for Rancher installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/upgrades/ha-server-rollbacks) > **Note:** If you are rolling back to versions in either of these scenarios, you must follow some extra [instructions]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/) in order to get your clusters working. > diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/_index.md index 19192f8e093..245af441455 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/_index.md @@ -4,8 +4,8 @@ weight: 1010 --- This section contains information about how to rollback your Rancher server to a previous version. -- [Rolling back a Single Node Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/) -- [Rolling back a High Availability Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) +- [Rolling back Rancher installed with Docker]({{}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/) +- [Rolling back Rancher installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) ### Special Scenarios regarding Rollbacks @@ -20,12 +20,12 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o 1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. - **Single Node Rancher Install** + **Rancher Installed with Docker** ``` docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json ``` - **HA Rancher Install** + **Rancher Installed on a Kubernetes Cluster** ``` kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json ``` @@ -38,7 +38,7 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o 5. Apply the backed up tokens based on how you installed Rancher. - **Single Node Rancher Install** + **Rancher Installed with Docker** Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. ``` @@ -58,7 +58,7 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o ``` After a few moments the clusters will go from Unavailable back to Available. - **HA Rancher Install** + **Rancher Installed on a Kubernetes Cluster** Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. ``` diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md index 04e1b27935d..3288777bd26 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md @@ -1,14 +1,13 @@ --- -title: High Availability (HA) Rollback +title: Kubernetes Rollback weight: 1025 aliases: - - /rancher/v2.x/en/backups/rollbacks/ha-server-rollbacks/ - /rancher/v2.x/en/upgrades/ha-server-rollbacks --- If you upgrade Rancher and the upgrade does not complete successfully, you may need to rollback your Rancher Server to its last healthy state. -To restore Rancher follow the procedure detailed here: [Restoring Backups — High Availability Installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration) +To restore Rancher follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{< baseurl >}}/rancher/v2.x/en/backups/restorations/ha-restoration) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md index f0304c2eb44..0a041e08ae8 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/_index.md @@ -1,12 +1,12 @@ --- -title: Single Node Rollback +title: Docker Rollback weight: 1015 aliases: - /rancher/v2.x/en/backups/rollbacks/single-node-rollbacks - /rancher/v2.x/en/upgrades/single-node-rollbacks --- -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Single Node Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade). Rolling back restores: +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade). Rolling back restores: - Your previous version of Rancher. - Your data backup created before upgrade. @@ -38,7 +38,7 @@ You can obtain `` and `` by loggi ## Rolling Back Rancher -If you have issues upgrading Rancher, roll it back to its lastest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. +If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. >**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. @@ -51,7 +51,7 @@ If you have issues upgrading Rancher, roll it back to its lastest known healthy ``` docker pull rancher/rancher: ``` - + 1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. ``` @@ -59,9 +59,9 @@ If you have issues upgrading Rancher, roll it back to its lastest known healthy ``` You can obtain the name for your Rancher container by entering `docker ps`. -1. Move the backup tarball that you created during completion of [Single Node Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Docker Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Single Node Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). + If you followed the naming convention we suggested in [Docker Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). 1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the [placeholder](#before-you-start). Don't forget to close the quotes. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/_index.md index 109c96c1bfa..d83b0af6f5a 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/_index.md @@ -2,21 +2,29 @@ title: Upgrades weight: 1005 --- -This section contains information about how to upgrade your Rancher server to a newer version. +This section contains information about how to upgrade your Rancher server to a newer version. Regardless if you installed in an air gap environment or not, the upgrade steps mainly depend on whether you have a single node or high-availability installation of Rancher. Select from the following options: -### Single Node Install +- [Upgrading Rancher installed with Docker]({{}}/rancher/v2.x/en/upgrades/upgrades/single-node/) +- [Upgrading Rancher installed on a Kubernetes cluster]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/) -- [Upgrading a Single Node Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/) -- [Upgrading an Air Gapped Single Node Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade/) +### Known Upgrade Issues -### Upgrading to an HA Helm Chart +The following table lists some of the most noteworthy issues to be considered when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -- [Upgrade an HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/) -- [Upgrade a Air Gap HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/) -- [Migrating from an RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) +Upgrade Scenario | Issue +---|--- +Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. +Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{< baseurl >}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0). +Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. +Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). -> ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +### Caveats +Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories/) aren't supported. + +### RKE Add-on Installs + +**Important: RKE add-on install is only supported up to Rancher v2.0.8** + +Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/#installation-outline). + +If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md deleted file mode 100644 index 9681ef22fed..00000000000 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: High Availability (HA) Upgrade - Air Gap -weight: 1021 ---- - -The following instructions will guide you through upgrading a high-availability Rancher Server installed in an air gap environment. - -## Prerequisites - -- **Populate Images** - - Follow the guide to [Prepare the Private Registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/) with the images for the upgrade Rancher release. - -- **Backup your Rancher Cluster** - - [Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) - of your Rancher Server cluster. You'll use the snapshot as a restoration point if something goes wrong during upgrade. - -- **kubectl** - - Follow the kubectl [configuration instructions]({{< baseurl >}}/rancher/v2.x/en/faq/kubectl) and confirm that you can connect to the Kubernetes cluster running Rancher server. - -- **helm** - - [Install or update](https://docs.helm.sh/using_helm/#installing-helm) Helm to the latest version. - -- **Upgrades to v2.0.7+ only: check system namespace locations**
- Starting in v2.0.7, Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). - -- **Upgrades to v2.2.0 only: mirror system-charts repository and configure Rancher**
- Starting in v2.2.0, Rancher introduced the [System Charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-system-charts/). - -## Caveats -Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories/) aren't supported. - -## Upgrade Rancher - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - - -2. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://kubernetes-charts.storage.googleapis.com - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -3. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest chart and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - -3. Render the upgrade template. - - Use the same `--set` values that you used for the install. Remember to set the `--is-upgrade` flag for `helm`. This will create a `rancher` directory with the Kubernetes manifest files. - - ```plain - helm template ./rancher-.tgz --output-dir . --is-upgrade \ - --name rancher --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher - ``` - -4. Copy and apply the rendered manifests. - - Copy the files to a server with access to the Rancher server cluster and apply the rendered templates. - - ```plain - kubectl -n cattle-system apply -R -f ./rancher - ``` - -**Result:** Rancher is upgraded. Log back into Rancher to confirm that the upgrade succeeded. - ->**Having Network Issues Following Upgrade?** -> -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -Should something go wrong, follow the [HA Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md deleted file mode 100644 index 471a172acf8..00000000000 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: High Availability (HA) Upgrade -weight: 1020 ---- - -The following instructions will guide you through upgrading a high-availability Rancher Server that was [installed using Helm package manager]({{< baseurl >}}/rancher/v2.x/en/installation/ha/). - ->**Note:** If you installed Rancher using the RKE Add-on yaml, see the following documents to migrate or upgrade. -> ->* [Migrating from RKE Add-On Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on) -> -> As of release v2.0.8, Rancher supports installation and upgrade by Helm chart, although RKE installs/upgrades are still supported as well. If you want to change upgrade method from RKE Add-on to Helm chart, follow this procedure. - - -## Prerequisites - -- **Backup your Rancher cluster** - - [Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) - of your Rancher Server cluster. You'll use the snapshot as a restoration point if something goes wrong during upgrade. - -- **kubectl** - - Follow the kubectl [configuration instructions]({{< baseurl >}}/rancher/v2.x/en/faq/kubectl) and confirm that you can connect to the Kubernetes cluster running Rancher server. - -- **Helm** - - [Install or update](https://docs.helm.sh/using_helm/#installing-helm) Helm to the latest version. - -- **Tiller** - - Update the helm agent, Tiller, on your cluster. - - ``` - helm init --upgrade --service-account tiller - ``` -- **Upgrades to v2.0.7+ only: check system namespace locations** - Starting in v2.0.7, Rancher introduced the `System` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). - -## Caveats -Upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories/) aren't supported. - -## Upgrade Rancher - -> **Note:** For Air Gap installs see [Upgrading HA Rancher - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#upgrading-rancher) - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -2. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://kubernetes-charts.storage.googleapis.com - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - -3. Get the set values from the current Rancher install. - - ``` - helm get values rancher - - hostname: rancher.my.org - ``` - - > **Note:** There may be more values that are listed with this command depending on which [SSL configuration option you selected]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/#choose-your-ssl-configuration) when installing Rancher. - -4. Upgrade Rancher to the latest version based on values from the previous steps. - - - Take all the values from the previous step and append them to the command using `--set key=value`. - - ``` - helm upgrade rancher rancher-/rancher --set hostname=rancher.my.org - ``` - -**Result:** Rancher is upgraded. Log back into Rancher to confirm that the upgrade succeeded. - ->**Having Network Issues Following Upgrade?** -> -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -Should something go wrong, follow the [HA Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md new file mode 100644 index 00000000000..b2ff236b0d1 --- /dev/null +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md @@ -0,0 +1,197 @@ +--- +title: Upgrading Rancher Installed on Kubernetes +weight: 1020 +aliases: + - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap + - /rancher/v2.x/en/upgrades/air-gap-upgrade/ +--- + +The following instructions will guide you through using Helm to upgrade a Rancher server that was installed on a Kubernetes cluster. + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). + +>**Notes:** +> +> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) +> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/upgrades/upgrades/ha/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. +> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.x/en/installation/options/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) + +# Prerequisites + +- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [caveats]({{}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) +- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [A. Back up your Kubernetes cluster that is running Rancher server](#a-backup-your-kubernetes-cluster-that-is-running-rancher-server) +- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) +- [C. Upgrade Rancher](#c-upgrade-rancher) +- [D. Verify the Upgrade](#d-verify-the-upgrade) + +### A. Back up Your Kubernetes Cluster that is Running Rancher Server + +[Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) +of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. + +### B. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://kubernetes-charts.storage.googleapis.com + rancher- https://releases.rancher.com/server-charts/ + ``` + + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + +### C. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. + +{{% tabs %}} +{{% tab "Kubernetes Upgrade" %}} + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher + +hostname: rancher.my.org +``` + +> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. + +If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher`. Otherwise, follow `Option A: Upgrading Rancher`. + +{{% accordion label="Option A: Upgrading Rancher" %}} + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`: + +``` +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +> **Note:** There will be many more options from the previous step that need to be appended. + +{{% /accordion %}} + +{{% accordion label="Option B: Reinstalling Rancher chart" %}} + +If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. + +Please refer the [Upgrading Cert-Manager]({{< baseurl >}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. + +1. Uninstall Rancher + + ``` + helm delete rancher -n cattle-system + ``` + +2. Reinstall Rancher to the latest version with all your settings. Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. + + ``` + helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org + ``` + +{{% /accordion %}} + +{{% /tab %}} + +{{% tab "Kubernetes Air Gap Upgrade" %}} + +1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + Based on the choice you made during installation, complete one of the procedures below. + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + +{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} + + ```plain +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} +{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} + +>**Note:** If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`. + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} + +2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + + Use `kubectl` to apply the rendered manifests. + + ```plain + kubectl -n cattle-system apply -R -f ./rancher + ``` + +{{% /tab %}} +{{% /tabs %}} + +### D. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). + +## Rolling Back + +Should something go wrong, follow the [roll back]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md new file mode 100644 index 00000000000..1c717cd0a03 --- /dev/null +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha/helm2/_index.md @@ -0,0 +1,199 @@ +--- +title: Upgrading Rancher Installed on Kubernetes with Helm 2 +weight: 1050 +--- + +> After Helm 3 was released, the [instructions for upgrading Rancher on a Kubernetes cluster](./ha) were updated to use Helm 3. +> +> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> This section provides a copy of the older instructions for upgrading Rancher with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). + +>**Notes:** +> +> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) +> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) +> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.x/en/upgrades/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +# Prerequisites + +- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [caveats]({{}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) +- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [A. Back up your Kubernetes cluster that is running Rancher server](#a-backup-your-kubernetes-cluster-that-is-running-rancher-server) +- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) +- [C. Upgrade Rancher](#c-upgrade-rancher) +- [D. Verify the Upgrade](#d-verify-the-upgrade) + +### A. Back up Your Kubernetes Cluster that is Running Rancher Server + +[Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) +of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. + +### B. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://kubernetes-charts.storage.googleapis.com + rancher- https://releases.rancher.com/server-charts/ + ``` + + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + +### C. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. + +{{% tabs %}} +{{% tab "Kubernetes Upgrade" %}} + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher + +hostname: rancher.my.org +``` + +> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. + +If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher`. Otherwise, follow `Option A: Upgrading Rancher`. + +{{% accordion label="Option A: Upgrading Rancher" %}} + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. + +``` +helm upgrade rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` +{{% /accordion %}} + +{{% accordion label="Option B: Reinstalling Rancher chart" %}} + +If you are currently running the cert-manger whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manger due to the API change in cert-manger v0.11. + +Please refer the [Upgrading Cert-Manager]({{< baseurl >}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) page for more information. + +1. Uninstall Rancher + + ``` + helm delete rancher -n cattle-system + ``` + +2. Reinstall Rancher to the latest version with all your settings. Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. + + ``` + helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org + ``` + +{{% /accordion %}} + +{{% /tab %}} + +{{% tab "Kubernetes Air Gap Upgrade" %}} + +1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + Based on the choice you made during installation, complete one of the procedures below. + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + +{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} + + ```plain +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} +{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} + +>**Note:** If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`. + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +{{% /accordion %}} + +2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + + Use `kubectl` to apply the rendered manifests. + + ```plain + kubectl -n cattle-system apply -R -f ./rancher + ``` + +{{% /tab %}} +{{% /tabs %}} + +### D. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). + +## Rolling Back + +Should something go wrong, follow the [roll back]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md index d8065508c90..c5e8091bdba 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md @@ -1,14 +1,14 @@ --- -title: Migrating from an HA RKE Add-on Install +title: Migrating from a Kubernetes Install with an RKE Add-on weight: 1030 aliases: - /rancher/v2.x/en/upgrades/ha-server-upgrade/ - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/ --- -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. +>If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. The following instructions will help guide you through migrating from the RKE Add-on install to managing Rancher with the Helm package manager. @@ -17,6 +17,8 @@ You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/insta > **Note:** This guide assumes a standard Rancher install. If you have modified any of the object names or namespaces, please adjust accordingly. +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. + ### Point kubectl at your Rancher Cluster Make sure `kubectl` is using the correct kubeconfig YAML file. Set the `KUBECONFIG` environmental variable to point to `kube_config_rancher-cluster.yml`: @@ -55,7 +57,7 @@ kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ Remove the Kubernetes objects created by the RKE install. -> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-HA Install]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups) for details. +> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups) for details. ``` kubectl -n cattle-system delete ingress cattle-ingress-http @@ -103,5 +105,5 @@ addons: |- From here follow the standard install steps. -* [3 - Initialize Helm (Install tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) -* [4 - Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/) +* [3 - Initialize Helm]({{< baseurl >}}/rancher/v2.x/en/installation/options/helm2/helm-init/) +* [4 - Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md index ad7b5398ec6..2d85fdad4d6 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/namespace-migration/_index.md @@ -1,7 +1,6 @@ --- title: Upgrading to v2.0.7+ — Namespace Migration -weight: -aliases: +weight: 1040 --- >This section applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. @@ -26,6 +25,8 @@ During upgrades from Rancher v2.0.6- to Rancher v2.0.7+, all system namespaces a - To prevent this issue from occurring before the upgrade, see [Preventing Cluster Networking Issues](#preventing-cluster-networking-issues). - To fix this issue following upgrade, see [Restoring Cluster Networking](#restoring-cluster-networking). +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.x/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. + ## Preventing Cluster Networking Issues You can prevent cluster networking issues from occurring during your upgrade to v2.0.7+ by unassigning system namespaces from all of your Rancher projects. Complete this task if you've assigned any of a cluster's system namespaces into a Rancher project. @@ -66,7 +67,7 @@ Reset the cluster nodes' network policies to restore connectivity. >Download and setup [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). {{% tabs %}} -{{% tab "HA Install" %}} +{{% tab "Kubernetes Install" %}} 1. From **Terminal**, change directories to your kubectl file that's generated during Rancher install, `kube_config_rancher-cluster.yml`. This file is usually in the directory where you ran RKE during Rancher installation. 1. Before repairing networking, run the following two commands to make sure that your nodes have a status of `Ready` and that your cluster components are `Healthy`. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade/_index.md deleted file mode 100644 index 56a210ad6e1..00000000000 --- a/content/rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Single Node Upgrade - Air Gap -weight: 1011 -aliases: - - /rancher/v2.x/en/upgrades/air-gap-upgrade/ ---- -To upgrade an air gapped Rancher Server, update your private registry with the latest Docker images, and then run the upgrade command. - -## Prerequisites -**Upgrades to v2.0.7+ only:** Starting in v2.0.7, Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). - -**Upgrades to v2.2.0 only: mirror system-charts repository and configure Rancher**
-Starting in v2.2.0, Rancher introduced the [System Charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-single-node/config-rancher-system-charts/). - -## Caveats -Upgrades _to_ or _from_ any tag containing [alpha]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#server-tags) aren't supported. - -## Upgrading An Air Gapped Rancher Server - -1. Follow the directions in Air Gap Installation to [pull the Docker images]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/#release-files) required for the new version of Rancher. - -2. Follow the directions in [Single Node Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/single-node-upgrade/) to complete upgrade of your air gapped Rancher Server. - - >**Note:** - > While completing [Single Node Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/single-node-upgrade/), prepend your private registry URL to the image when running the `docker run` command. - > - > Example: `/rancher/rancher:latest` - -**Result:** Rancher is upgraded. Log back into Rancher to confirm that the upgrade succeeded. - ->**Having Network Issues Following Upgrade?** -> -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back -If your upgrade does not complete successfully, you can roll Rancher Server and its data back to its last healthy state. For more information, see [Single Node Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/_index.md deleted file mode 100644 index a9a9f1d1226..00000000000 --- a/content/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/_index.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Single Node Upgrade -weight: 1010 -aliases: - - /rancher/v2.x/en/upgrades/single-node-upgrade/ ---- -To upgrade Rancher Server 2.x when a new version is released, create a data container for your current Rancher deployment, pull the latest image of Rancher, and then start a new Rancher container using your data container. - -## Before You Start - -During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - - -``` -docker run --volumes-from rancher-data -v $PWD:/backup busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `-` is the version number and date of creation for a backup of Rancher. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#completing-the-upgrade). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | -| `` | `2018-12-19` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Prerequisites -**Upgrades to v2.0.7+ only:** Starting in v2.0.7, Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#preventing-cluster-networking-issues). - -## Caveats -Upgrades _to_ or _from_ any tag containing [alpha]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#server-tags) aren't supported. - -## Completing the Upgrade - -During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#before-you-start). - - ``` - docker stop - ``` - -1. Use the command below, replacing each [placeholder](#before-you-start), to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). - - This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each [placeholder](#before-you-start). - - - ``` - docker run --volumes-from rancher-data -v $PWD:/backup busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** When you enter this command, a series of commands should run. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - - ``` - [rancher@ip-10-0-0-50 ~]$ ls - rancher-data-backup-v2.1.3-20181219.tar.gz - ``` - -1. Move your backup tarball to a safe location external from your Rancher Server. - - -1. Pull the most recent image of Rancher. - - ``` - docker pull rancher/rancher:latest - ``` - - >**Attention Air Gap Users:** - > If you are visiting this page to complete [Air Gap Upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/air-gap-upgrade), prepend your private registry URL to the image when running the `docker run` command. - > - > Example: `/rancher/rancher:latest` - > - -1. Start a new Rancher Server container using the data from the `rancher-data` container. - - ``` - docker run -d --volumes-from rancher-data --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:latest - ``` - - >**Attention Let’s Encrypt Users:** - > - >Remember to append `--acme-domain ` to the run command, otherwise Rancher will fall back to using self signed certificates. - >``` - >docker run -d --volumes-from rancher-data --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:latest --acme-domain - >``` - - - >**Want records of all transactions with the Rancher API?** - > - >Enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your upgrade command. - >``` - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - ``` - - >**Note:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. - >
- >
- >**Note:** After upgrading Rancher Server, data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. - -1. Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. - - - -1. Remove the previous Rancher Server container. - - If you only stop the previous Rancher Server container (and don't remove it), the container may restart after the next server reboot. - -**Result:** Rancher is upgraded. Log back into Rancher to confirm that the upgrade succeeded. - ->**Having Network Issues Following Upgrade?** -> -> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -If your upgrade does not complete successfully, you can roll Rancher Server and its data back to its last healthy state. For more information, see [Single Node Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md new file mode 100644 index 00000000000..6c5581e8f0b --- /dev/null +++ b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md @@ -0,0 +1,320 @@ +--- +title: Upgrading Rancher Installed with Docker +weight: 1010 +aliases: + - /rancher/v2.x/en/upgrades/single-node-upgrade/ + - /rancher/v2.x/en/upgrades/upgrades/single-node-air-gap-upgrade +--- + +The following instructions will guide you through upgrading a Rancher server that was installed with Docker. + +# Prerequisites + +- **Review the [known upgrade issues]({{}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [caveats]({{}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) +- **For [air gap installs only,]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. + +# Placeholder Review + +During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). + +Here's an **example** of a command with a placeholder: + +``` +docker stop +``` + +In this command, `` is the name of your Rancher container. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the upgrade. + +Terminal `docker ps` Command, Displaying Where to Find `` and `` +![Placeholder Reference]({{< baseurl >}}/img/rancher/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | +| `` | `2018-12-19` | The date that the data container or backup was created. | +
+ +You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +# Upgrade Outline + +During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: + +- [A. Create a copy of the data from your Rancher server container](#a-create-a-copy-of-the-data-from-your-rancher-server-container) +- [B. Create a backup tarball](#b-create-a-backup-tarball) +- [C. Pull the new Docker image](#c-pull-the-new-docker-image) +- [D. Start the new Rancher server container](#d-start-the-new-rancher-server-container) +- [E. Verify the Upgrade](#e-verify-the-upgrade) +- [F. Clean up your old Rancher server container](#f-clean-up-your-old-rancher-server-container) + +### A. Create a copy of the data from your Rancher server container + +1. Using a remote Terminal connection, log into the node running your Rancher server. + +1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data rancher/rancher: + ``` + +### B. Create a backup tarball + +1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). + + This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each [placeholder](#before-you-start). + + + ``` + docker run --volumes-from rancher-data -v $PWD:/backup busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** When you enter this command, a series of commands should run. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + + ``` + [rancher@ip-10-0-0-50 ~]$ ls + rancher-data-backup-v2.1.3-20181219.tar.gz + ``` + +1. Move your backup tarball to a safe location external from your Rancher server. + +### C. Pull the New Docker Image + +Pull the image of the Rancher version that you want to upgrade to. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +``` +docker pull rancher/rancher: +``` + +### D. Start the New Rancher Server Container + +Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. + +>**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. + +If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/) + +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) + +If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) + +To see the command to use when starting the new Rancher server container, choose from the following options: + +- Docker Upgrade +- Docker Upgrade for Air Gap Installs + +{{% tabs %}} +{{% tab "Docker Upgrade" %}} + +Select which option you had installed Rancher server + +{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher: +``` + +{{% /accordion %}} + +{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +Placeholder | Description +------------|------------- + `` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher: +``` + +{{% /accordion %}} +{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + rancher/rancher: \ + --no-cacerts +``` +{{% /accordion %}} +{{% accordion id="option-d" label="Option D-Let's Encrypt Certificate" %}} + +>**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. + +>**Reminder of the Cert Prerequisites:** +> +>- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +>- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. +`` | The domain address that you had originally started with + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher: \ + --acme-domain +``` + +{{% /accordion %}} + +{{% /tab %}} +{{% tab "Docker Air Gap Upgrade" %}} + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher prior to v2.3.0.]({{}}/rancher/v2.x/en/installation/options/local-system-charts/#setting-up-system-charts-for-rancher-prior-to-v2-3-0) + +When starting the new Rancher server container, choose from the following options: + +{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to to upgrade to. + +``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` +{{% /accordion %}} + +{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` +{{% /accordion %}} + +{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. + + >**Reminder of the Prerequisite:** The certificate files must be in [PEM format]({{}}/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/#pem). In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [SSL FAQ / Troubleshooting](#cert-order). + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{< baseurl >}}/rancher/v2.x/en/installation/options/server-tags/) that you want to upgrade to. + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` +{{% /accordion %}} +{{% /tab %}} +{{% /tabs %}} + +**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. + +### E. Verify the Upgrade + +Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. + +>**Having network issues in your user clusters following upgrade?** +> +> See [Restoring Cluster Networking]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/namespace-migration/#restoring-cluster-networking). + + +### F. Clean up Your Old Rancher Server Container + +Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. + +## Rolling Back + +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{< baseurl >}}/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.x/en/user-settings/api-keys/_index.md b/content/rancher/v2.x/en/user-settings/api-keys/_index.md index 3afd1ff05f7..a824b0d58f5 100644 --- a/content/rancher/v2.x/en/user-settings/api-keys/_index.md +++ b/content/rancher/v2.x/en/user-settings/api-keys/_index.md @@ -29,7 +29,7 @@ API Keys are composed of four components: The API key won't be valid after expiration. Shorter expiration periods are more secure. - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{< baseurl >}}/v2.x/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) for more information. + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{< baseurl >}}/rancher/v2.x/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. 4. Click **Create**. diff --git a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md index 9c10255eba0..90112383200 100644 --- a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md @@ -7,6 +7,10 @@ Service discovery is one of the core functionalities of any container-based envi This document will also show you how to link the workloads and services that you migrated into Rancher v2.x. When you parsed your services from v1.6 using migration-tools CLI, it output two files for each service: one deployment manifest and one service manifest. You'll have to link these two files together before the deployment works correctly in v2.x. +
Resolve the output.txt Link Directive
+ +![Resolve Link Directive]({{< baseurl >}}/img/rancher/resolve-links.png) + ## In This Document @@ -58,14 +62,22 @@ When you migrate v1.6 services to v2.x, Rancher does not automatically create a In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are linked together. +
Linked Workload and Kubernetes Service
+ +![Linked Workload and Kubernetes Service]({{< baseurl >}}/img/rancher/linked-service-workload.png) + + ### Service Name Alias Creation Just as you can create an alias for Rancher v1.6 services, you can do the same for Rancher v2.x workloads. Similarly, you can also create DNS records pointing to services running externally, using either their hostname or IP address. These DNS records are Kubernetes service objects. -Using the v2.x UI, use the context menu to navigate to the `Project` view and choose the **Service Discovery** tab. All existing DNS records created for your workloads are listed under each namespace. +Using the v2.x UI, use the context menu to navigate to the `Project` view. Then click **Resources > Workloads > Service Discovery.** (In versions prior to v2.3.0, click the **Workloads > Service Discovery** tab.) All existing DNS records created for your workloads are listed under each namespace. Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods. +
Add Service Discovery Record
+![Add Service Discovery Record]({{< baseurl >}}/img/rancher/add-record.png) + The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. Option | Kubernetes-implemented? | Rancher-implemented? diff --git a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md index 5315469581a..35c81ae5e58 100644 --- a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md @@ -61,7 +61,7 @@ For example, for the web-deployment.yml file parsed from v1.6 that we've been us
Port Mapping: Setting HostPort
-![Set HostPort]({{< baseurl >}}/img/rancher/set-hostport.gif) +{{< img "/img/rancher/set-hostport.gif" "Set HostPort">}} ## NodePort @@ -99,6 +99,6 @@ For example, for the `web-deployment.yml` file parsed from v1.6 that we've been
Port Mapping: Setting NodePort
-![Set NodePort]({{< baseurl >}}/img/rancher/set-nodeport.gif) +{{< img "/img/rancher/set-nodeport.gif" "Set NodePort" >}} ### [Next: Configure Health Checks]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/monitor-apps) diff --git a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md index f5a1b6147d9..4d4f2d9ad40 100644 --- a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md @@ -26,20 +26,24 @@ Before installing v2.x, provision one host or more to function as your Rancher S After provisioning your node(s), install Rancher: -- [Single Node Install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node) +- [Docker Install]({{}}/rancher/v2.x/en/installation/single-node) - For development environments, we recommend a single node install. This installation procedure deploys a single Rancher container to your host. + For development environments, Rancher can be installed on a single node using Docker. This installation procedure deploys a single Rancher container to your host. -- [High Availability (HA) Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) +- [Kubernetes Install]({{}}/rancher/v2.x/en/installation/k8s-install/) - For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability (HA) configuration. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. + For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. - >**Important Difference:** Although you could install Rancher v1.6 in an HA configuration using an external database and a Docker command on each node, Rancher v2.x in an HA configuration requires an existing Kubernetes cluster. Review [High Availability (HA) Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) for full requirements. + >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{< baseurl >}}/rancher/v2.x/en/installation/k8s-install/) for full requirements. ## B. Configure Authentication After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication). +
Rancher v2.x Authentication
+ +![Rancher v2.x Authentication]({{< baseurl >}}/img/rancher/auth-providers.svg) + ### Local Users Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) and assign them access rights. diff --git a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md index c34f46f1cca..6885d6794a1 100644 --- a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md @@ -9,6 +9,10 @@ As outlined in [its documentation]({{< baseurl >}}/rancher/v1.6/en/cattle/adding If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x. +
output.txt Load Balancer Directive
+ +![Resolve Load Balancer Directive]({{< baseurl >}}/img/rancher/resolve-load-balancer.png) + ## In This Document @@ -26,7 +30,7 @@ If you encounter the `output.txt` text below after parsing your v1.6 Compose fil By default, Rancher v2.x replaces the v1.6 load balancer microservice with the native [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), which is backed by NGINX Ingress Controller for layer 7 load balancing. By default, Kubernetes Ingress only supports the HTTP and HTTPS protocols, not TCP. Load balancing is limited to these two protocols when using Ingress. -> **TCP Required?** See [TCP Load Balancing Optionsl](#tcp-load-balancing-options) +> **TCP Required?** See [TCP Load Balancing Options](#tcp-load-balancing-options) ## Load Balancer Deployment @@ -35,7 +39,7 @@ In Rancher v1.6, you could add port/service rules for configuring your HAProxy t Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. -By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only, but you can read about cloud providers in [our documentation]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/). +By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only. RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. @@ -49,8 +53,16 @@ In Rancher v1.6 you could deploy a scalable load balancer service within your st +
Rancher v1.6 Load Balancing Architecture
+ +![Rancher v1.6 Load Balancing]({{< baseurl >}}/img/rancher/cattle-load-balancer.svg) + The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads. +
Rancher v2.x Load Balancing Architecture
+ +![Rancher v2.x Load Balancing]({{< baseurl >}}/img/rancher/kubernetes-load-balancer.svg) + ## Ingress Caveats Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balancing, you must use unique host names and paths when configuring your workloads. This limitation derives from: @@ -62,12 +74,18 @@ Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balan ## Deploying Ingress -You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Workloads** from the main menu. Then choose the **Load Balancing** tab and begin by clicking **Deploy**. During deployment, you can choose a target project or namespace. +You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Resources > Workloads > Load Balancing.** (In versions prior to v2.3.0, click **Workloads > Load Balancing.**) Then click **Deploy**. During deployment, you can choose a target project or namespace. >**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. > -For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and then select the **Load Balancing** tab. Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. +![Workload Scale]({{< baseurl >}}/img/rancher/workload-scale.png) + +For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions prior to v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. + +
Browsing to Load Balancer Tab and Adding Ingress
+ +![Adding Ingress]({{< baseurl >}}/img/rancher/add-ingress.gif) Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. @@ -77,8 +95,16 @@ Using Rancher v2.x, you can add Ingress rules that are based on host names or a For example, let's say you have multiple workloads deployed to a single namespace. You can add an Ingress to route traffic to these two workloads using the same hostname but different paths, as depicted in the image below. URL requests to `foo.com/name.html` will direct users to the `web` workload, and URL requests to `foo.com/login` will direct users to the `chat` workload. +
Ingress: Path-Based Routing Configuration
+ +![Ingress: Path-Based Routing Configuration]({{< baseurl >}}/img/rancher/add-ingress-form.png) + Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address. +
Workload Links
+ +![Load Balancer Links to Workloads]({{< baseurl >}}/img/rancher/load-balancer-links.png) + The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: ``` @@ -92,6 +118,10 @@ Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want - We recommend [uploading a certificate]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. - If you have configured [NGINX default certificate]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. +
Load Balancer Configuration: SSL/TLS Certificate Section
+ +![SSL/TLS Certificates Section]({{< baseurl >}}/img/rancher/load-balancer-ssl-certs.png) + ### TCP Load Balancing Options #### Layer-4 Load Balancer @@ -100,6 +130,10 @@ For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`. +
Workload Deployment: Layer 4 Load Balancer Creation
+ +![Deploy Layer-4 Load Balancer]({{< baseurl >}}/img/rancher/deploy-workload-load-balancer.png) + Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. #### NGINX Ingress Controller TCP Support by ConfigMaps @@ -110,6 +144,8 @@ However, there is a workaround to use NGINX's TCP balancing by creating a Kubern To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. +![Layer-4 Load Balancer: ConfigMap Workaround]({{< baseurl >}}/img/rancher/layer-4-lb-config-map.png) + The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. ## Rancher v2.x Load Balancing Limitations @@ -121,6 +157,6 @@ Cattle provided feature-rich load balancer support that is [well documented]({{< - Only ports 80 and 443 can be configured for HTTP/HTTPS routing via Ingress. Also Ingress Controller is deployed globally as a DaemonSet and not launched as a scalable service. Also, users cannot assign random external ports to be used for balancing. Therefore, users need to ensure that they configure unique hostname/path combinations to avoid routing conflicts using the same two ports. - There is no way to specify port rule priority and ordering. - Rancher v1.6 added support for draining backend connections and specifying a drain timeout. This is not supported in Rancher v2.x. -- There is no support for specifying a custom stickiness policy and a custom load balancer config to be appended to the default config as of now in Rancher v2.x. There is some support, however, available in native Kubernetes for customizing the NGINX configuration as noted in the [NGINX Ingress Controller Custom Conguration Documentation](https://kubernetes.github.io/ingress-nginx/examples/customization/custom-configuration/). +- There is no support for specifying a custom stickiness policy and a custom load balancer config to be appended to the default config as of now in Rancher v2.x. There is some support, however, available in native Kubernetes for customizing the NGINX configuration as noted in the [NGINX Ingress Controller Custom Configuration Documentation](https://kubernetes.github.io/ingress-nginx/examples/customization/custom-configuration/). ### Finished! diff --git a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md index ad16e2cac2c..c9ea17668c4 100644 --- a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md @@ -11,6 +11,10 @@ Use this document to correct Rancher v2.x workloads and services that list `heal For example, for the image below, we would configure liveness probes for the `web` and `weblb` workloads (i.e., the Kubernetes manifests output by migration-tools CLI). +
Resolve health_check for the web and webLB Workloads
+ +![Resolve health_check]({{< baseurl >}}/img/rancher/resolve-health-checks.png) + ## In This Document @@ -37,9 +41,11 @@ The health check microservice features two types of health checks, which have a The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. +![Rancher v1.6 Health Checks]({{}}/img/rancher/healthcheck.svg) + ## Rancher v2.x Health Checks -In Rancher v2.x, the health check microservice is replaced with Kubernete's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. +In Rancher v2.x, the health check microservice is replaced with Kubernetes's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. However, probes in Rancher v2.x have some important differences, which are described below. For full details about probes, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes). @@ -63,6 +69,8 @@ Kubernetes includes two different _types_ of probes: liveness checks and readine The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. +![Rancher v2.x Probes]({{}}/img/rancher/probes.svg) + ## Configuring Probes in Rancher v2.x The [migration-tool CLI]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. @@ -73,6 +81,10 @@ If the probe fails, the container is restarted per the restartPolicy defined in Configure probes by using the **Health Check** section while editing deployments called out in `output.txt`. +
Edit Deployment: Health Check Section
+ +![Health Check Section]({{< baseurl >}}/img/rancher/health-check-section.png) + ### Configuring Checks While you create a workload using Rancher v2.x, we recommend configuring a check that monitors the health of the deployment's pods. @@ -85,6 +97,8 @@ TCP checks monitor your deployment's health by attempting to open a connection t You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). +![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) + When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. +
Rancher v2.x: Workload Deployment
+ +![Workload Tab and Group by Node Icon]({{< baseurl >}}/img/rancher/schedule-specific-node.png) + Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. If you expose the workload using a NodePort that conflicts with another workload, the deployment gets created successfully, but no NodePort service is created. Therefore, the workload isn't exposed outside of the cluster. -After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the **Workloads** tab, click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. +After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. + +![Pods Scheduled to Same Node]({{< baseurl >}}/img/rancher/scheduled-nodes.png) ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. To create a daemonset while configuring a workload, choose **Run one pod on each node** from the **Workload Type** options. +
Workload Configuration: Choose run one pod on each node to configure daemonset
+ +![choose Run one pod on each node]({{< baseurl >}}/img/rancher/workload-type.png) + ### Scheduling Pods Using Resource Constraints While creating a service in the Rancher v1.6 UI, you could schedule its containers to hosts based on hardware requirements that you choose. The containers are then scheduled to hosts based on which ones have bandwidth, memory, and CPU capacity. @@ -204,6 +238,10 @@ To declare resource constraints, edit your migrated workloads, editing the **Sec - Memory Limit - CPU Limit +
Scheduling: Resource Constraint Settings
+ +![Resource Constraint Settings]({{< baseurl >}}/img/rancher/resource-constraint-settings.png) + You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). ### [Next: Service Discovery]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services/) diff --git a/content/rke/latest/en/_index.md b/content/rke/latest/en/_index.md index 9d5fa4cf5a9..9ff72d7c3a2 100644 --- a/content/rke/latest/en/_index.md +++ b/content/rke/latest/en/_index.md @@ -1,7 +1,8 @@ --- title: Overview of RKE shortTitle: RKE +description: RKE solves Kubernetes installation complexity. With RKE, Kubernetes installation is simplified, regardless of what OSs and platforms you’re running. weight: 1 --- -Rancher Kubernetes Engine (RKE) is a light-weight Kubernetes installer that supports installation on bare-metal and virtualized servers. RKE solves a common issue in the Kubernetes community: installation complexity. With RKE, Kubernetes installation is simplified, regardless of what operating systems and platforms you're running. +Rancher Kubernetes Engine (RKE) is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. It works on bare-metal and virtualized servers. RKE solves the problem of installation complexity, a common issue in the Kubernetes community. With RKE, the installation and operation of Kubernetes is both simplified and easily automated, and it's entirely independent of the operating system and platform you're running. As long as you can run a supported version of Docker, you can deploy and run Kubernetes with RKE. diff --git a/content/rke/latest/en/config-options/_index.md b/content/rke/latest/en/config-options/_index.md index ce568009de1..ecf29f2a412 100644 --- a/content/rke/latest/en/config-options/_index.md +++ b/content/rke/latest/en/config-options/_index.md @@ -1,5 +1,6 @@ --- -title: Config Options +title: Kubernetes Configuration Options +description: There are a lot of different Kubernetes Configuration options you can choose from when setting up your cluster.yml for RKE weight: 200 --- @@ -25,7 +26,9 @@ There are several options that can be configured in cluster configuration option * [External Etcd]({{< baseurl >}}/rke/latest/en/config-options/services/external-etcd/) * [Authentication]({{< baseurl >}}/rke/latest/en/config-options/authentication/) * [Authorization]({{< baseurl >}}/rke/latest/en/config-options/authorization/) +* [Rate Limiting]({{}}/rke/latest/en/config-options/rate-limiting/) * [Cloud Providers]({{< baseurl >}}/rke/latest/en/config-options/cloud-providers/) +* [Audit Log]({{}}/rke/latest/en/config-options/audit-log) * [Add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/) * [Network Plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) * [DNS providers]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) @@ -57,40 +60,9 @@ ignore_docker_version: true ### Kubernetes Version -By default, RKE is defaulted to launch with a specific Kubernetes version. You can also select a different version of Kubernetes to install for your cluster. Each version of RKE has a specific list of supported Kubernetes versions. +For information on upgrading Kubernetes, refer to the [upgrade section.]({{}}/rke/latest/en/upgrades/) -You can set the Kubernetes version as follows: - -```yaml -kubernetes_version: "v1.11.6-rancher1-1" -``` - -In case both `kubernetes_version` and [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) are defined, the system images configuration will take precedence over `kubernetes_version`. - -#### Listing Supported Kubernetes Versions - -Please refer to the [release notes](https://github.com/rancher/rke/releases) of the RKE version that you are running, to find the list of supported Kubernetes versions as well as the default Kubernetes version. - -You can also list the supported versions and system images of specific version of RKE release with a quick command. - -``` -$ rke config --system-images --all - -INFO[0000] Generating images list for version [v1.13.4-rancher1-2]: -....... -INFO[0000] Generating images list for version [v1.11.8-rancher1-1]: -....... -INFO[0000] Generating images list for version [v1.12.6-rancher1-2]: -....... -``` - -#### Using an unsupported Kubernetes version - -As of v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, then RKE will error out. - -Prior to v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, the default version from the supported list is used. - -If you want to use a different version from the supported list, please use the [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) option. +Rolling back to previous Kubernetes versions is not supported. ### Prefix Path diff --git a/content/rke/latest/en/config-options/add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/_index.md index 8a659431c83..a665230b268 100644 --- a/content/rke/latest/en/config-options/add-ons/_index.md +++ b/content/rke/latest/en/config-options/add-ons/_index.md @@ -3,23 +3,20 @@ title: Add-Ons weight: 260 --- -RKE supports pluggable add-ons. Add-ons are used to deploy several cluster components including: +RKE supports configuring pluggable add-ons in the cluster YML. Add-ons are used to deploy several cluster components including: * [Network plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) * [Ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) * [DNS provider]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) * [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/) -The images used for these add-ons under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. +These add-ons require images that can be found under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. -In addition to these pluggable add-ons, you can specify an add-on that you want deployed after the cluster deployment is complete. - -RKE only adds additional add-ons when using `rke up` multiple times. RKE does **not** support removing of cluster add-ons when doing `rke up` with a different list of add-ons. - -As of v0.1.8, RKE will update an add-on if it is the same name. - -Prior to v0.1.8, update any add-ons by using `kubectl edit`. +There are a few things worth noting: +* In addition to these pluggable add-ons, you can specify an add-on that you want deployed after the cluster deployment is complete. +* As of v0.1.8, RKE will update an add-on if it is the same name. +* Prior to v0.1.8, update any add-ons by using `kubectl edit`. ## Critical and Non-Critical Add-ons @@ -37,3 +34,18 @@ RKE uses Kubernetes jobs to deploy add-ons. In some cases, add-ons deployment ta ```yaml addon_job_timeout: 30 ``` + +## Add-on placement + +_Applies to v0.2.3 and higher_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ------------------ | ------------------------------------------ | ------------ | ----------- | +| Calico | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | +| Flannel | `beta.kubernetes.io/os:NotIn:windows` | none | - `operator:Exists` | +| Canal | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | +| Weave | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | +| CoreDNS | `node-role.kubernetes.io/worker:Exists` | `beta.kubernetes.io/os:linux` | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | +| kube-dns | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | +| nginx-ingress | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | +| metrics-server | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | diff --git a/content/rke/latest/en/config-options/add-ons/dns/_index.md b/content/rke/latest/en/config-options/add-ons/dns/_index.md index 636aa6aee2f..a00aa2e5a12 100644 --- a/content/rke/latest/en/config-options/add-ons/dns/_index.md +++ b/content/rke/latest/en/config-options/add-ons/dns/_index.md @@ -16,6 +16,8 @@ RKE provides the following DNS providers that can be deployed as add-ons: CoreDNS was made the default in RKE v0.2.5 when using Kubernetes 1.14 and higher. If you are using an RKE version lower than v0.2.5, kube-dns will be deployed by default. +> **Note:** If you switch from one DNS provider to another, the existing DNS provider will be removed before the new one is deployed. + # CoreDNS _Available as of v0.2.5_ diff --git a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md b/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md index 8044305eb63..a7da4af0cd6 100644 --- a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md +++ b/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md @@ -1,5 +1,6 @@ --- -title: Ingress Controllers +title: K8s Ingress Controllers +description: By default, RKE deploys the NGINX ingress controller. Learn how to schedule and disable default k8s ingress controllers, and how to configure NGINX controller weight: 262 --- diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md index b8f7a26ab82..cb26c78fe57 100644 --- a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md +++ b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md @@ -10,6 +10,8 @@ RKE provides the following network plug-ins that are deployed as add-ons: - Canal - Weave +> **Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn’t allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + By default, the network plug-in is `canal`. If you want to use another network plug-in, you need to specify which network plug-in to enable at the cluster level in the `cluster.yml`. ```yaml @@ -20,7 +22,7 @@ network: The images used for network plug-ins are under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each network plug-in, but these can be overridden by changing the image tag in `system_images`. -## Disabling deployment of a network plug-in +# Disabling Deployment of a Network Plug-in You can disable deploying a network plug-in by specifying `none` to the network `plugin` directive in the cluster configuration. @@ -29,11 +31,11 @@ network: plugin: none ``` -## Network Plug-in Options +# Network Plug-in Options Besides the different images that could be used to deploy network plug-ins, certain network plug-ins support additional options that can be used to customize the network plug-in. -### Canal Network Plug-in Options +## Canal Network Plug-in Options ```yaml network: @@ -48,7 +50,7 @@ network: By setting the `canal_iface`, you can configure the interface to use for inter-host communication. The `canal_flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. -### Flannel Network Plug-in Options +## Flannel Network Plug-in Options ```yaml network: @@ -63,7 +65,7 @@ network: By setting the `flannel_iface`, you can configure the interface to use for inter-host communication. The `flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. -### Calico Network Plug-in Options +## Calico Network Plug-in Options ```yaml network: @@ -80,7 +82,7 @@ Calico currently only supports 2 cloud providers, AWS or GCE, which can be set u - `aws` - `gce` -### Weave Network Plug-in Options +## Weave Network Plug-in Options ```yaml network: @@ -92,3 +94,8 @@ network: #### Weave encryption Weave encryption can be enabled by passing a string password to the network provider config. + + +## Custom Network Plug-ins + +It is possible to add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. In the `addons` field, you can add the add-on manifest of a cluster that has the network plugin-that you want, as shown in [this example.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md new file mode 100644 index 00000000000..d942be998aa --- /dev/null +++ b/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md @@ -0,0 +1,207 @@ +--- +title: Custom Network Plug-in Example +weight: 1 +--- + +The below example shows how to configure a custom network plug-in with an in-line add-on to the `cluster.yml`. + +First, to edit the network plug-ins, change the `network` section of the YAML from: + +``` +network: + options: + flannel_backend_type: "vxlan" + plugin: "canal" +``` +to: +``` +network: + plugin: none +``` + +Then, in the `addons` section of the `cluster.yml`, you can add the add-on manifest of a cluster that has the network plugin-that you want. In the below example, we are replacing the Canal plugin with a Flannel plugin by adding the add-on manifest for the cluster through the `addons` field: + +``` +addons: |- + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel + subjects: + - kind: ServiceAccount + name: flannel + namespace: kube-system + --- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-cfg + namespace: "kube-system" + labels: + tier: node + app: flannel + data: + cni-conf.json: | + { + "name":"cbr0", + "cniVersion":"0.3.1", + "plugins":[ + { + "type":"flannel", + "delegate":{ + "forceAddress":true, + "isDefaultGateway":true + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] + } + net-conf.json: | + { + "Network": "10.42.0.0/16", + "Backend": { + "Type": "vxlan" + } + } + --- + apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: kube-flannel + namespace: "kube-system" + labels: + tier: node + k8s-app: flannel + spec: + template: + metadata: + labels: + tier: node + k8s-app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: NotIn + values: + - windows + serviceAccountName: flannel + containers: + - name: kube-flannel + image: rancher/coreos-flannel:v0.10.0-rancher1 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 150m + memory: 64M + command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: install-cni + image: rancher/flannel-cni:v0.3.0-rancher1 + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-flannel-cfg + key: cni-conf.json + - name: CNI_CONF_NAME + value: "10-flannel.conflist" + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + - key: node.kubernetes.io/not-ready + effect: NoSchedule + operator: Exists + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: 20% + type: RollingUpdate + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: flannel + namespace: kube-system +``` +**Result:** The cluster is up with the custom network plug-in. \ No newline at end of file diff --git a/content/rke/latest/en/config-options/audit-log/_index.md b/content/rke/latest/en/config-options/audit-log/_index.md new file mode 100644 index 00000000000..f92fd76bbe2 --- /dev/null +++ b/content/rke/latest/en/config-options/audit-log/_index.md @@ -0,0 +1,134 @@ +--- +title: Audit Log +weight: 251 +--- + +Kubernetes auditing provides a security-relevant chronological set of records about a cluster. Kube-apiserver performs auditing. Each request on each stage of its execution generates an event, which is then pre-processed according to a certain policy and written to a backend. The policy determines what’s recorded and the backends persist the records. + +You might want to configure the audit log as part of compliance with the CIS (Center for Internet Security) Kubernetes Benchmark controls. + +For configuration details, refer to the [official Kubernetes documentation.](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) + +### Example Configurations + +The audit log can be enabled by default using the following configuration in `cluster.yml`: + +```yaml +services: + kube-api: + audit_log: + enabled: true +``` + +When the audit log is enabled, you should be able to see the default values at `/etc/kubernetes/audit.yaml`: + +```yaml +# Minimum Configuration: Capture event metadata. +... +rules: +- level: Metadata +... +``` + +When the audit log is enabled, default values are also set for the audit log path, maximum age, maximum number of backups, maximum size in megabytes, and format. To see the default values, run: + +``` +ps -ef | grep kube-apiserver +``` + +The default values for the audit log should be displayed: + +```yaml +--audit-log-maxage=5 # The maximum number of days to retain old audit log files +--audit-log-maxbackup=5 # The maximum number of audit log files to retain +--audit-log-path=/var/log/kube-audit/audit-log.json # The log file path that log backend uses to write audit events +--audit-log-maxsize=100 # The maximum size in megabytes of the audit log file before it gets rotated +--audit-policy-file=/etc/kubernetes/audit.yaml # The file containing your audit log rules +--audit-log-format=json # The log file format + +``` + +To customize the audit log, the `configuration` directive is used. + +A rules policy is passed to kube-apiserver using the `--audit-policy-file` or the `policy` directive in the `cluster.yml`. Below is an example `cluster.yml` with custom values and an audit log policy nested under the `configuration` directive. This example audit log policy is taken from the official [Kubernetes documentation:](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy) + +```yaml +services: + kube-api: + audit_log: + enabled: true + configuration: + max_age: 6 + max_backup: 6 + max_size: 110 + path: /var/log/kube-audit/audit-log.json + format: json + policy: + apiVersion: audit.k8s.io/v1 # This is required. + kind: Policy + omitStages: + - "RequestReceived" + rules: + # Log pod changes at RequestResponse level + - level: RequestResponse + resources: + - group: "" + # Resource "pods" doesn't match requests to any subresource of pods, + # which is consistent with the RBAC policy. + resources: ["pods"] + # Log "pods/log", "pods/status" at Metadata level + - level: Metadata + resources: + - group: "" + resources: ["pods/log", "pods/status"] + + # Don't log requests to a configmap called "controller-leader" + - level: None + resources: + - group: "" + resources: ["configmaps"] + resourceNames: ["controller-leader"] + + # Don't log watch requests by the "system:kube-proxy" on endpoints or services + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core API group + resources: ["endpoints", "services"] + + # Don't log authenticated requests to certain non-resource URL paths. + - level: None + userGroups: ["system:authenticated"] + nonResourceURLs: + - "/api*" # Wildcard matching. + - "/version" + + # Log the request body of configmap changes in kube-system. + - level: Request + resources: + - group: "" # core API group + resources: ["configmaps"] + # This rule only applies to resources in the "kube-system" namespace. + # The empty string "" can be used to select non-namespaced resources. + namespaces: ["kube-system"] + + # Log configmap and secret changes in all other namespaces at the Metadata level. + - level: Metadata + resources: + - group: "" # core API group + resources: ["secrets", "configmaps"] + + # Log all other resources in core and extensions at the Request level. + - level: Request + resources: + - group: "" # core API group + - group: "extensions" # Version of group should NOT be included. + + # A catch-all rule to log all other requests at the Metadata level. + - level: Metadata + # Long-running requests like watches that fall under this rule will not + # generate an audit event in RequestReceived. + omitStages: + - "RequestReceived" +``` \ No newline at end of file diff --git a/content/rke/latest/en/config-options/authorization/_index.md b/content/rke/latest/en/config-options/authorization/_index.md index 1bfd1f16084..6d40ca89548 100644 --- a/content/rke/latest/en/config-options/authorization/_index.md +++ b/content/rke/latest/en/config-options/authorization/_index.md @@ -5,7 +5,7 @@ weight: 240 Kubernetes supports multiple [Authorization Modules](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules). Currently, RKE only supports the [RBAC module](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). -By default, RBAC is already enabled. If you wanted to turn off RBAC support, **which isn't recommended**, you set the authorization mode to `none`. +By default, RBAC is already enabled. If you wanted to turn off RBAC support, **which isn't recommended**, you set the authorization mode to `none` in your `cluster.yml`. ```yaml authorization: diff --git a/content/rke/latest/en/config-options/bastion-host/_index.md b/content/rke/latest/en/config-options/bastion-host/_index.md index bade5d19232..3b6848759c6 100644 --- a/content/rke/latest/en/config-options/bastion-host/_index.md +++ b/content/rke/latest/en/config-options/bastion-host/_index.md @@ -3,7 +3,7 @@ title: Bastion/Jump Host Configuration weight: 220 --- -Since RKE uses `ssh` to connect to [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/), you can configure to use a bastion host. Keep in mind that the [port requirements]({{< baseurl >}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. +Since RKE uses `ssh` to connect to [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/), you can configure the `cluster.yml` so RKE will use a bastion host. Keep in mind that the [port requirements]({{< baseurl >}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. Our private SSH key(s) only needs to reside on the host running RKE. You do not need to copy your private SSH key(s) to the bastion host. ```yaml bastion_host: diff --git a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md b/content/rke/latest/en/config-options/cloud-providers/aws/_index.md index 278a54b1230..c580991232f 100644 --- a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md +++ b/content/rke/latest/en/config-options/cloud-providers/aws/_index.md @@ -3,7 +3,7 @@ title: AWS Cloud Provider weight: 251 --- -To enable the AWS cloud provider, there are no configuration options. You only need to set the name as `aws`. In order to use the AWS cloud provider, all cluster nodes must have already been configured with an [appropriate IAM role](#iam-requirements) and your AWS resources must be [tagged with a cluster ID](#tagging-amazon-resources). +To enable the AWS cloud provider, there are no RKE configuration options. You only need to set the name as `aws`. In order to use the AWS cloud provider, all cluster nodes must have already been configured with an [appropriate IAM role](#iam-requirements) and your AWS resources must be [tagged with a cluster ID](#tagging-amazon-resources). ```yaml cloud_provider: @@ -12,7 +12,7 @@ cloud_provider: ## IAM Requirements -The nodes used in RKE that will be running the AWS cloud provider must have at least the following IAM policy. +The nodes used in RKE that will be running the AWS cloud provider must have at least the following IAM policy (`rancher-role.json`). ```json { @@ -22,7 +22,7 @@ The nodes used in RKE that will be running the AWS cloud provider must have at l } ``` -In order to use Elastic Load Balancers (ELBs) and EBS with Kubernetes, the node(s) will need to have the an IAM role with appropriate access. +In order to use Elastic Load Balancers (ELBs) and EBS with Kubernetes, the node(s) will need to have the an IAM role with appropriate access (`rancher-policy.json`). ## Example Policy for IAM Role: @@ -54,6 +54,18 @@ In order to use Elastic Load Balancers (ELBs) and EBS with Kubernetes, the node( } ``` +Deploy files to AWS IAM: + +```bash +$ aws iam create-instance-profile --instance-profile-name rancher-node +$ aws iam create-role --role-name rancher-node --assume-role-policy-document file://rancher-role.json +$ aws iam put-role-policy --role-name rancher-node --policy-name rancher-policy --policy-document file://rancher-policy.json +$ aws iam add-role-to-instance-profile --instance-profile rancher-node --role-name rancher-node +``` + +Set `IAM Instance Profile Name` in node template to `rancher-node` + + ## Tagging Amazon Resources Any resources used in a Kubernetes cluster with the Amazon cloud provider must be tagged with a cluster ID. diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md index 9a79826b2df..d73d2169d74 100644 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md +++ b/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md @@ -3,256 +3,27 @@ title: vSphere Cloud Provider weight: 254 --- +In order to provision Kubernetes clusters in vSphere with the RKE CLI, you must enable the vSphere cloud provider. + +The vSphere cloud provider must also be enabled in order to provision clusters with Rancher, which uses RKE as a library when provisioning [RKE clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) + The [vSphere Cloud Provider](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) interacts with VMware infrastructure (vCenter or standalone ESXi server) to provision and manage storage for persistent volumes in a Kubernetes cluster. -When provisioning Kubernetes using RKE CLI or using [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in Rancher, the vSphere Cloud Provider can be enabled by configuring the `cloud_provider` directive in the cluster YAML file. - -### Prerequisites - -1. You'll need to have credentials of a vCenter/ESXi user account with privileges allowing the cloud provider to interact with the vSphere infrastructure to provision storage. Refer to [this document](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/vcp-roles.html) to create and assign a role with the required permissions in vCenter. -2. VMware Tools must be running in the Guest OS for all nodes in the cluster. -3. All nodes must be configured with disk UUIDs. This is required so that attached VMDKs present a consistent UUID to the VM, allowing the disk to be mounted properly. See [Enabling Disk UUIDs](#enabling-disk-uuids-for-vsphere-vms). - -## Clusters provisioned with RKE CLI - -To enable the vSphere Cloud Provider in the cluster, you must add the top-level `cloud_provider` directive to the cluster configuration file, set the `name` property to `vsphere` and add the `vsphereCloudProvider` directive containing the configuration matching your infrastructure. See the [configuration reference](#configuration-reference) for the gory details. - -## Clusters provisioned with Rancher - -When provisioning clusters in Rancher using the [vSphere node driver]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) or on pre-created [custom nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/) the cluster YAML file must be modified in order to enable the cloud provider. - -1. Log in to the Rancher UI as admin user. -2. Navigate to **Clusters** in the **Global** view. -3. Click **Add Cluster** and select the **vSphere** infrastructure provider. -4. Assign a **Cluster Name**. -5. Assign **Member Roles** as required. -6. Expand **Cluster Options** and configure as required. -7. Set **Cloud Provider** option to `Custom`. - - ![vsphere-node-driver-cloudprovider]({{< baseurl >}}/img/rancher/vsphere-node-driver-cloudprovider.png) - -8. Click on **Edit as YAML** -9. Insert the following top-level structure to the pre-populated cluster YAML. Note that the `name` *must* be set to `vsphere`. Refer to the [configuration reference](#configuration-reference) to learn about the properties of the `vsphereCloudProvider` directive. - - ```yaml - cloud_provider: - name: vsphere - vsphereCloudProvider: - [Insert provider configuration] - ``` - -10. Configure the **Node Pools** per your requirements while ensuring to use a node template that enables disk UUIDs for the VMs (See [Annex - Enable disk UUIDs for vSphere VMs]). -11. Click on **Create** to start provisioning the VMs and Kubernetes services. - -## Configuration Reference - -> **Note:** This documentation reflects the new vSphere Cloud Provider configuration schema introduced in Kubernetes v1.9 which differs from previous versions. - -The vSphere configuration options are divided into 5 groups: - -* global -* virtual_center -* workspace -* disk -* network - -### global - -The main purpose of global options is to be able to define a common set of configuration parameters that will be inherited by all vCenters defined under the `virtual_center` directive unless explicitly defined there. - -Accordingly, the `global` directive accepts the same configuration options that are available under the `virtual_center` directive. Additionally it accepts a single parameter that can only be specified here: - -| global Options | Type | Required | Description | -|:---------------:|:-------:|:---------:|:-----------------------------------------------------------------------------:| -| insecure-flag | boolean | | Set to **true** if the vCenter/ESXi uses a self-signed certificate. | - -___ - -**Example:** - -```yaml -(...) - global: - insecure-flag: true -``` - -### virtual_center - -This configuration directive specifies the vCenters that are managing the nodes in the cluster. You must define at least one vCenter/ESXi server. If the nodes span multiple vCenters then all must be defined. - -Each vCenter is defined by adding a new entry under the `virtual_center` directive with the vCenter IP or FQDN as the name. All required parameters must be provided for each vCenter unless they are already defined under the `global` directive. - -| virtual_center Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------:| -| user | string | * | vCenter/ESXi user used to authenticate with this server. | -| password | string | * | User's password. | -| port | string | | Port to use to connect to this server. Defaults to 443. | -| datacenters | string | * | Comma-separated list of all datacenters in which cluster nodes are running in.| -| soap-roundtrip-count | uint | | Round tripper count for API requests to the vCenter (num retries = value - 1).| - - -> The following additional options (introduced in Kubernetes v1.11) are not yet supported in RKE. - -| virtual_center Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------:| -| secret-name | string | | Name of secret resource containing credential key/value pairs. Can be specified in lieu of user/password parameters.| -| secret-namespace | string | | Namespace in which the secret resource was created in. | -| ca-file | string | | Path to CA cert file used to verify the vCenter certificate. | - -___ - -**Example:** - -```yaml -(...) - virtual_center: - 172.158.111.1: {} # This vCenter inherits all it's properties from global options - 172.158.110.2: # All required options are set explicitly - user: vc-user - password: othersecret - datacenters: eu-west-2 -``` - -### workspace - -This configuration group specifies how storage for volumes is created in vSphere. -The following configuration options are available: - -| workspace Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------:| -| server | string | * | IP or FQDN of the vCenter/ESXi that should be used for creating the volumes. Must match one of the vCenters defined under the `virtual_center` directive.| -| datacenter | string | * | Name of the datacenter that should be used for creating volumes. For ESXi enter *ha-datacenter*.| -| folder | string | * | Path of folder in which to create dummy VMs used for volume provisioning (relative from the root of the datastore), e.g. "kubernetes".| -| default-datastore | string | | Name of default datastore to place VMDKs if neither datastore or storage policy are specified in the volume options of a PVC. If datastore is located in a storage folder or is a member of a datastore cluster, specify the full path. | -| resourcepool-path | string | | Absolute or relative path to the resource pool where the dummy VMs for [Storage policy based provisioning](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/policy-based-mgmt.html) should be created. If a relative path is specified, it is resolved with respect to the datacenter's *host* folder. Examples: `//host//Resources/`, `Resources/`. For standalone ESXi specify `Resources`.| - -___ - -**Example:** - -```yaml -(...) - workspace: - server: 172.158.111.1 # matches IP of vCenter defined in the virtual_center block - datacenter: eu-west-1 - folder: k8s-dummy - default-datastore: ds-1 -``` - -### disk - -The following configuration options are available under the disk directive: - -| disk Options | Type | Required | Description | -|:--------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------:| -| scsicontrollertype | string | | SCSI controller type to use when attaching block storage to VMs. Must be one of: *lsilogic-sas* or *pvscsi*. Default: *pvscsi*.| - -___ - -### network - -The following configuration options are available under the network directive: - -| network Options | Type | Required | Description | -|:-------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------:| -| public-network | string | | Name of public **VM Network** to which the VMs in the cluster are connected. Used to determine public IP addresses of VMs.| - - -## Configuration Example - -Given the following: - -- VMs in the cluster are running in the same datacenter `eu-west-1` managed by the vCenter `vc.example.com`. -- The vCenter has a user `provisioner` with password `secret` with the required roles assigned, see [Prerequisites](#prerequisites). -- The vCenter has a datastore named `ds-1` which should be used to store the VMDKs for volumes. -- A `k8s-dummy` folder exists in the root of the datastore. - -The corresponding configuration for the provider would then be as follows: - -```yaml -(...) -cloud_provider: - name: vsphere - vsphereCloudProvider: - virtual_center: - vc.example.com: - user: provisioner - password: secret - datacenters: eu-west-1 - workspace: - server: vc.example.com - folder: k8s-dummy - default-datastore: ds-1 - datacenter: eu-west-1 - -``` - -## Annex - -### Enabling disk UUIDs for vSphere VMs - -Depending on whether you are provisioning the VMs using the [vSphere node driver]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere) in Rancher or using your own scripts or third-party tools, there are different methods available to enable disk UUIDs for VMs. - -#### Using the Vsphere Console - -The required property can be set while creating or modifying VMs in the vSphere Console: - -1. For each VM navigate to the tab **VM Options** and click on **Edit Configuration**. -2. Add the parameter `disk.EnableUUID` with a value of **TRUE**. - - ![vsphere-advanced-parameters]({{< baseurl >}}/img/rke/vsphere-advanced-parameters.png) - -#### Using the GOVC CLI tool - -You can also modify properties of VMs with the [govc](https://github.com/vmware/govmomi/tree/master/govc) command-line tool to enable disk UUIDs: - -```sh -$ govc vm.change -vm -e disk.enableUUID=TRUE -``` - -#### Using Rancher node template - -When creating new clusters in Rancher using vSphere node templates, you can configure the template to automatically enable disk UUIDs for all VMs created for a cluster: - -1. Navigate to the **Node Templates** in the Rancher UI while logged in as admin user. - -2. Add or edit an existing vSphere node template. - -3. Under **Instance Options** click on **Add Parameter**. - -4. Enter `disk.enableUUID` as key with a value of **TRUE**. - - ![vsphere-nodedriver-enable-uuid]({{< baseurl >}}/img/rke/vsphere-nodedriver-enable-uuid.png) - -5. Click **Create** or **Save**. - -### Troubleshooting - -If you are experiencing issues while provisioning a cluster with enabled vSphere Cloud Provider or while creating vSphere volumes for your workloads, you should inspect the logs of the following K8s services: - -- controller-manager (Manages volumes in vCenter) -- kubelet: (Mounts vSphere volumes to pods) - -If your cluster is not configured with external [Cluster Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging/), you will need to SSH into nodes to get the logs of the `kube-controller-manager` (running on one of the control plane nodes) and the `kubelet` (pertaining to the node where the stateful pod has been scheduled). - -The easiest way to create a SSH session with a node is the Rancher CLI tool. - -1. [Configure the Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/) for your cluster. -2. Run the following command to get a shell to the corresponding nodes: - - ```sh -$ rancher ssh - ``` - -3. Inspect the logs of the controller-manager and kubelet containers looking for errors related to the vSphere cloud provider: - - ```sh - $ docker logs --since 15m kube-controller-manager - $ docker logs --since 15m kubelet - ``` - +This section describes how to enable the vSphere cloud provider. You will need to use the `cloud_provider` directive in the cluster YAML file. ### Related Links -- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) +- **Configuration:** For details on vSphere configuration in RKE, refer to the [configuration reference.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference) +- **Troubleshooting:** For guidance on troubleshooting a cluster with the vSphere cloud provider enabled, refer to the [troubleshooting section.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting) +- **Storage:** If you are setting up storage, see the [official vSphere documentation on storage for Kubernetes,](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) or the [official Kubernetes documentation on persistent volumes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) If you are using Rancher, refer to the [Rancher documentation on provisioning storage in vSphere.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) +- **For Rancher users:** Refer to the Rancher documentation on [creating vSphere Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere) and [provisioning storage.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) + +# Prerequisites + +- **Credentials:** You'll need to have credentials of a vCenter/ESXi user account with privileges allowing the cloud provider to interact with the vSphere infrastructure to provision storage. Refer to [this document](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/vcp-roles.html) to create and assign a role with the required permissions in vCenter. +- **VMware Tools** must be running in the Guest OS for all nodes in the cluster. +- **Disk UUIDs:** All nodes must be configured with disk UUIDs. This is required so that attached VMDKs present a consistent UUID to the VM, allowing the disk to be mounted properly. See the section on [enabling disk UUIDs.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid) + +# Enabling the vSphere Provider with the RKE CLI + +To enable the vSphere Cloud Provider in the cluster, you must add the top-level `cloud_provider` directive to the cluster configuration file, set the `name` property to `vsphere` and add the `vsphereCloudProvider` directive containing the configuration matching your infrastructure. See the [configuration reference]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference) for the gory details. \ No newline at end of file diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md new file mode 100644 index 00000000000..c0da0176c69 --- /dev/null +++ b/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md @@ -0,0 +1,144 @@ +--- +title: vSphere Configuration Reference +weight: 3 +--- + +This section shows an example of how to configure the vSphere cloud provider. + +The vSphere cloud provider must be enabled to allow dynamic provisioning of volumes. + +For more details on deploying a Kubernetes cluster on vSphere, refer to the [official cloud provider documentation.](https://cloud-provider-vsphere.sigs.k8s.io/tutorials/kubernetes-on-vsphere-with-kubeadm.html) + +> **Note:** This documentation reflects the new vSphere Cloud Provider configuration schema introduced in Kubernetes v1.9 which differs from previous versions. + +# vSphere Configuration Example + +Given the following: + +- VMs in the cluster are running in the same datacenter `eu-west-1` managed by the vCenter `vc.example.com`. +- The vCenter has a user `provisioner` with password `secret` with the required roles assigned, see [Prerequisites](#prerequisites). +- The vCenter has a datastore named `ds-1` which should be used to store the VMDKs for volumes. +- A `vm/kubernetes` folder exists in vCenter. + +The corresponding configuration for the provider would then be as follows: + +```yaml +(...) +cloud_provider: + name: vsphere + vsphereCloudProvider: + virtual_center: + vc.example.com: + user: provisioner + password: secret + port: 443 + datacenters: /us-west-1 + workspace: + server: vc.example.com + folder: /us-west-1/folder/myvmfolder + default-datastore: /us-west-1/datastore/ds-1 + datacenter: /us-west-1 + resourcepool-path: /us-west-1/host/hn1/resources/myresourcepool + +``` +# Configuration Options + +The vSphere configuration options are divided into 5 groups: + +* [global](#global) +* [virtual_center](#virtual_center) +* [workspace](#workspace) +* [disk](#disk) +* [network](#network) + +### global + +The main purpose of global options is to be able to define a common set of configuration parameters that will be inherited by all vCenters defined under the `virtual_center` directive unless explicitly defined there. + +Accordingly, the `global` directive accepts the same configuration options that are available under the `virtual_center` directive. Additionally it accepts a single parameter that can only be specified here: + +| global Options | Type | Required | Description | +|:---------------:|:-------:|:---------:|:---------| +| insecure-flag | boolean | | Set to **true** if the vCenter/ESXi uses a self-signed certificate. | + +**Example:** + +```yaml +(...) + global: + insecure-flag: true +``` + +### virtual_center + +This configuration directive specifies the vCenters that are managing the nodes in the cluster. You must define at least one vCenter/ESXi server. If the nodes span multiple vCenters then all must be defined. + +Each vCenter is defined by adding a new entry under the `virtual_center` directive with the vCenter IP or FQDN as the name. All required parameters must be provided for each vCenter unless they are already defined under the `global` directive. + +| virtual_center Options | Type | Required | Description | +|:----------------------:|:--------:|:---------:|:-----------| +| user | string | * | vCenter/ESXi user used to authenticate with this server. | +| password | string | * | User's password. | +| port | string | | Port to use to connect to this server. Defaults to 443. | +| datacenters | string | * | Comma-separated list of all datacenters in which cluster nodes are running in. | +| soap-roundtrip-count | uint | | Round tripper count for API requests to the vCenter (num retries = value - 1). | + +> The following additional options (introduced in Kubernetes v1.11) are not yet supported in RKE. + +| virtual_center Options | Type | Required | Description | +|:----------------------:|:--------:|:---------:|:-------| +| secret-name | string | | Name of secret resource containing credential key/value pairs. Can be specified in lieu of user/password parameters.| +| secret-namespace | string | | Namespace in which the secret resource was created in. | +| ca-file | string | | Path to CA cert file used to verify the vCenter certificate. | + +**Example:** + +```yaml +(...) + virtual_center: + 172.158.111.1: {} # This vCenter inherits all it's properties from global options + 172.158.110.2: # All required options are set explicitly + user: vc-user + password: othersecret + datacenters: eu-west-2 +``` + +### workspace + +This configuration group specifies how storage for volumes is created in vSphere. +The following configuration options are available: + +| workspace Options | Type | Required | Description | +|:----------------------:|:--------:|:---------:|:---------| +| server | string | * | IP or FQDN of the vCenter/ESXi that should be used for creating the volumes. Must match one of the vCenters defined under the `virtual_center` directive.| +| datacenter | string | * | Name of the datacenter that should be used for creating volumes. For ESXi enter *ha-datacenter*.| +| folder | string | * | Path of folder in which to create dummy VMs used for volume provisioning (relative from the root folder in vCenter), e.g. "vm/kubernetes".| +| default-datastore | string | | Name of default datastore to place VMDKs if neither datastore or storage policy are specified in the volume options of a PVC. If datastore is located in a storage folder or is a member of a datastore cluster, specify the full path. | +| resourcepool-path | string | | Absolute or relative path to the resource pool where the dummy VMs for [Storage policy based provisioning](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/policy-based-mgmt.html) should be created. If a relative path is specified, it is resolved with respect to the datacenter's *host* folder. Examples: `//host//Resources/`, `Resources/`. For standalone ESXi specify `Resources`. | + +**Example:** + +```yaml +(...) + workspace: + server: 172.158.111.1 # matches IP of vCenter defined in the virtual_center block + datacenter: eu-west-1 + folder: vm/kubernetes + default-datastore: ds-1 +``` + +### disk + +The following configuration options are available under the disk directive: + +| disk Options | Type | Required | Description | +|:--------------------:|:--------:|:---------:|:----------------| +| scsicontrollertype | string | | SCSI controller type to use when attaching block storage to VMs. Must be one of: *lsilogic-sas* or *pvscsi*. Default: *pvscsi*. | + +### network + +The following configuration options are available under the network directive: + +| network Options | Type | Required | Description | +|:-------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------| +| public-network | string | | Name of public **VM Network** to which the VMs in the cluster are connected. Used to determine public IP addresses of VMs.| \ No newline at end of file diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md new file mode 100644 index 00000000000..fb8ae19158d --- /dev/null +++ b/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md @@ -0,0 +1,35 @@ +--- +title: Enabling Disk UUIDs for vSphere VMs +weight: 2 +--- + +In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. This is required so that attached VMDKs present a consistent UUID to the VM, allowing the disk to be mounted properly. + +Depending on whether you are provisioning the VMs using the [vSphere node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere) in Rancher or using your own scripts or third-party tools, there are different methods available to enable disk UUIDs for VMs: + +- [Using the vSphere console](#using-the-vsphere-console) +- [Using the GOVC CLI tool](#using-the-govc-cli-tool) +- [Using a Rancher node template](#using-a-rancher-node-template) + +### Using the vSphere Console + +The required property can be set while creating or modifying VMs in the vSphere Console: + +1. For each VM navigate to the tab **VM Options** and click on **Edit Configuration**. +2. Add the parameter `disk.EnableUUID` with a value of **TRUE**. + + {{< img "/img/rke/vsphere-advanced-parameters.png" "vsphere-advanced-parameters" >}} + +### Using the GOVC CLI tool + +You can also modify properties of VMs with the [govc](https://github.com/vmware/govmomi/tree/master/govc) command-line tool to enable disk UUIDs: + +```sh +$ govc vm.change -vm -e disk.enableUUID=TRUE +``` + +### Using a Rancher Node Template + +In Rancher v2.0.4+, disk UUIDs are enabled in vSphere node templates by default. + +If you are using Rancher prior to v2.0.4, refer to the [Rancher documentation.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/#enabling-disk-uuids-with-a-node-template) for details on how to enable a UUID with a Rancher node template. diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md new file mode 100644 index 00000000000..6d2cffca67f --- /dev/null +++ b/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md @@ -0,0 +1,27 @@ +--- +title: Troubleshooting vSphere Clusters +weight: 4 +--- + +If you are experiencing issues while provisioning a cluster with enabled vSphere Cloud Provider or while creating vSphere volumes for your workloads, you should inspect the logs of the following K8s services: + +- controller-manager (Manages volumes in vCenter) +- kubelet: (Mounts vSphere volumes to pods) + +If your cluster is not configured with external [Cluster Logging]({{< baseurl >}}/rancher/v2.x/en/tools/logging/), you will need to SSH into nodes to get the logs of the `kube-controller-manager` (running on one of the control plane nodes) and the `kubelet` (pertaining to the node where the stateful pod has been scheduled). + +The easiest way to create a SSH session with a node is the Rancher CLI tool. + +1. [Configure the Rancher CLI]({{< baseurl >}}/rancher/v2.x/en/cli/) for your cluster. +2. Run the following command to get a shell to the corresponding nodes: + + ```sh +$ rancher ssh + ``` + +3. Inspect the logs of the controller-manager and kubelet containers looking for errors related to the vSphere cloud provider: + + ```sh + $ docker logs --since 15m kube-controller-manager + $ docker logs --since 15m kubelet + ``` diff --git a/content/rke/latest/en/config-options/nodes/_index.md b/content/rke/latest/en/config-options/nodes/_index.md index 84208496786..75321c4c6b9 100644 --- a/content/rke/latest/en/config-options/nodes/_index.md +++ b/content/rke/latest/en/config-options/nodes/_index.md @@ -5,9 +5,33 @@ weight: 210 The `nodes` directive is the only required section in the `cluster.yml` file. It's used by RKE to specify cluster node(s), ssh credentials used to access the node(s) and which roles these nodes will be in the Kubernetes cluster. +This section covers the following topics: + +- [Node configuration example](#node-configuration-example) +- [Kubernetes roles](#kubernetes-roles) + - [etcd](#etcd) + - [Controlplane](#controlplane) + - [Worker](#worker) +- [Node options](#node-options) + - [Address](#address) + - [Internal address](#internal-address) + - [Overriding the hostname](#overriding-the-hostname) + - [SSH port](#ssh-port) + - [SSH users](#ssh-users) + - [SSH key path](#ssh-key-path) + - [SSH key](#ssh-key) + - [SSH certificate path](#ssh-certificate-path) + - [SSH certificate](#ssh-certificate) + - [Docker socket](#docker-socket) + - [Labels](#labels) + - [Taints](#taints) + +# Node Configuration Example + +The following example shows node configuration in an example `cluster.yml`: + ```yaml nodes: - nodes: - address: 1.1.1.1 user: ubuntu role: @@ -36,6 +60,10 @@ nodes: ssh_key_path: /home/user/.ssh/id_rsa ssh_cert: |- ssh-rsa-cert-v01@openssh.com AAAAHHNza... + taints: # Available as of v0.3.0 + - key: test-key + value: test-value + effect: NoSchedule - address: example.com user: ubuntu role: @@ -46,7 +74,33 @@ nodes: app: ingress ``` -## Node Options +# Kubernetes Roles + +You can specify the list of roles that you want the node to be as part of the Kubernetes cluster. Three roles are supported: `controlplane`, `etcd` and `worker`. Node roles are not mutually exclusive. It's possible to assign any combination of roles to any node. It's also possible to change a node's role using the upgrade process. + +> **Note:** Prior to v0.1.8, workloads/pods might have run on any nodes with `worker` or `controlplane` roles, but as of v0.1.8, they will only be deployed to any `worker` nodes. + +### etcd + +With this role, the `etcd` container will be run on these nodes. Etcd keeps the state of your cluster and is the most important component in your cluster, single source of truth of your cluster. Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. Etcd is a distributed reliable key-value store which stores all Kubernetes state. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **etcd** role is shown below: + +Taint Key | Taint Value | Taint Effect +---------------------------------------|--------------|-------------- +`node-role.kubernetes.io/etcd` | `true` | `NoExecute` + +### Controlplane + +With this role, the stateless components that are used to deploy Kubernetes will run on these nodes. These components are used to run the API server, scheduler, and controllers. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **controlplane** role is shown below: + +Taint Key | Taint Value | Taint Effect +---------------------------------------|--------------|-------------- +`node-role.kubernetes.io/controlplane` | `true` | `NoSchedule` + +### Worker + +With this role, any workloads or pods that are deployed will land on these nodes. + +# Node Options Within each node, there are multiple directives that can be used. @@ -90,32 +144,6 @@ For each node, you can specify the path, i.e. `ssh_cert_path`, for the signed SS Instead of setting the path to the signed SSH certificate, you can alternatively specify the actual certificate, i.e. `ssh_cert`, to be used to connect to the node. -### Kubernetes Roles - -You can specify the list of roles that you want the node to be as part of the Kubernetes cluster. Three roles are supported: `controlplane`, `etcd` and `worker`. Node roles are not mutually exclusive. It's possible to assign any combination of roles to any node. It's also possible to change a node's role using the upgrade process. - -> **Note:** Prior to v0.1.8, workloads/pods might have run on any nodes with `worker` or `controlplane` roles, but as of v0.1.8, they will only be deployed to any `worker` nodes. - -* **etcd** - -With this role, the `etcd` container will be run on these nodes. Etcd keeps the state of your cluster and is the most important component in your cluster, single source of truth of your cluster. Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. Etcd is a distributed reliable key-value store which stores all Kubernetes state. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **etcd** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/etcd` | `true` | `NoExecute` - -* **controlplane** - -With this role, the stateless components that are used to deploy Kubernetes will run on these nodes. These components are used to run the API server, scheduler, and controllers. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **controlplane** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/controlplane` | `true` | `NoSchedule` - -* **worker** - -With this role, any workloads or pods that are deployed will land on these nodes. - ### Docker Socket If the Docker socket is different than the default, you can set the `docker_socket`. The default is `/var/run/docker.sock` @@ -123,3 +151,9 @@ If the Docker socket is different than the default, you can set the `docker_sock ### Labels You have the ability to add an arbitrary map of labels for each node. It can be used when using the [ingress controller's]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) `node_selector` option. + +### Taints + +_Available as of v0.3.0_ + +You have the ability to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) for each node. diff --git a/content/rke/latest/en/config-options/private-registries/_index.md b/content/rke/latest/en/config-options/private-registries/_index.md index b835a3480a2..5a5c1a4d18e 100644 --- a/content/rke/latest/en/config-options/private-registries/_index.md +++ b/content/rke/latest/en/config-options/private-registries/_index.md @@ -3,7 +3,7 @@ title: Private Registries weight: 215 --- -RKE supports the ability to configure multiple private Docker registries. By passing in your registry and credentials, it allows the nodes to pull images from these private registries. +RKE supports the ability to configure multiple private Docker registries in the `cluster.yml`. By passing in your registry and credentials, it allows the nodes to pull images from these private registries. ```yaml private_registries: diff --git a/content/rke/latest/en/config-options/rate-limiting/_index.md b/content/rke/latest/en/config-options/rate-limiting/_index.md new file mode 100644 index 00000000000..2e942415a72 --- /dev/null +++ b/content/rke/latest/en/config-options/rate-limiting/_index.md @@ -0,0 +1,54 @@ +--- +title: Rate Limiting +weight: 241 +--- + +Using the `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time period. In a large multi-tenant cluster, there might be a small percentage of tenants that flood the server with event requests, which could have a significant impact on the performance of the cluster overall. Therefore, it is recommended to limit the rate of events that the API server will accept. + +You might want to configure event rate limit as part of compliance with the CIS (Center for Internet Security) Kubernetes Benchmark. Event rate limiting corresponds to the CIS Kubernetes Benchmark 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored). + +Rate limits can be configured for the server, a namespace, a user, or a combination of a source and an object. + +For configuration details, refer to the [official Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit) + +### Example Configurations + +The following configuration in the `cluster.yml` can be used to enable the event rate limit by default: + +```yaml +services: + kube-api: + event_rate_limit: + enabled: true +``` + +When the event rate limit is enabled, you should be able to see the default values at `/etc/kubernetes/admission.yaml`: + +```yaml +... +plugins: +- configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - burst: 20000 + qps: 5000 + type: Server +... +``` + +To customize the event rate limit, the entire Kubernetes resource for the configuration must be provided in the `configuration` directive: + +```yaml +services: + kube-api: + event_rate_limit: + enabled: true + configuration: + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 6000 + burst: 30000 +``` \ No newline at end of file diff --git a/content/rke/latest/en/config-options/secrets-encryption/_index.md b/content/rke/latest/en/config-options/secrets-encryption/_index.md new file mode 100644 index 00000000000..c4dc378a249 --- /dev/null +++ b/content/rke/latest/en/config-options/secrets-encryption/_index.md @@ -0,0 +1,174 @@ +--- +title: Encrypting Secret Data at Rest +weight: 230 +--- + +As of version `v0.3.1` RKE adds the support for managing secret data encryption at rest, which is [supported by Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#before-you-begin) since version `v1.13`. + +At-rest data encryption is required for: + +- Compliance requirements +- Additional layer of security +- Reduce security impact of etcd node compromise +- Reduce security impact of etcd backups compromise +- Ability to use external Key Management Systems + +RKE provides users with two paths of configuration to enable at-rest data encryption: + +- Managed at-rest data encryption +- Custom configuration for at-rest data encryption + +Both configuration options can be added during initial cluster provisioning or by updating an existing cluster. + +To utilize this feature, a new field `secrets_encryption_config` is added to the [Kubernetes API service configuration]({{}}//rke/latest/en/config-options/services/#kubernetes-api-server). A full custom configuration looks like this: + +```yaml +services: + kube-api: + secrets_encryption_config: + enabled: true + custom_config: + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: k-fw5hn + secret: RTczRjFDODMwQzAyMDVBREU4NDJBMUZFNDhCNzM5N0I= + - identity: {} + +``` +# Managed At-Rest Data Encryption + +Enabling and disabling at-rest data encryption in Kubernetes is a relatively complex process that requires several steps to be performed by the Kubernetes cluster administrator. The managed configuration aims to reduce this overhead and provides a simple abstraction layer to manage the process. + +### Enable Encryption +Managed at-rest data encryption is disabled by default and can be enabled by using the following configuration: + +```yaml +services: + kube-api: + secrets_encryption_config: + enabled: true +``` +Once enabled, RKE will perform the following [actions](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data) to enable at-rest data encryption: + +- Generate a new random 32-bit encryption key +- Generate an encryption provider configuration file using the new key The default [provider](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers) used is `aescbc` +- Deploy the provider configuration file to all nodes with `controlplane` role +- Update the `kube-apiserver` container arguments to point to the provider configuration file. +- Restart the `kube-apiserver` container. + +After the `kube-api server` is restarted, data encryption is enabled. However, all existing secrets are still stored in plain text. RKE will [rewrite](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted) all secrets to ensure encryption is fully in effect. + +### Disable Encryption +To disable encryption, you can either set the `enabled` flag to `false`, or simply remove the `secrets_encryption_config` block entirely from cluster.yml. + +```yaml +services: + kube-api: + secrets_encryption_config: + enabled: false +``` + +Once encryption is disabled in `cluster.yml`, RKE will perform the following [actions](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data) to disable encryption in your cluster: + +- Generate a new provider configuration file with the no-encryption `identity{}` provider as the first provider, and the previous `aescbc` set in the second place. This will allow Kubernetes to use the first entry to write the secrets, and the second one to decrypt them. +- Deploy the new provider configuration and restart `kube-apiserver`. +- Rewrite all secrets. This is required because, at this point, new data will be written to disk in plain text, but the existing data is still encrypted using the old provider. By rewriting all secrets, RKE ensures that all stored data is decrypted. +- Update `kube-apiserver` arguments to remove the encryption provider configuration and restart the `kube-apiserver`. +- Remove the provider configuration file. + + +# Key Rotation +Sometimes there is a need to rotate encryption config in your cluster. For example, the key is compromised. There are two ways to rotate the keys: with an RKE CLI command, or by disabling and re-enabling encryption in `cluster.yml`. + +### Rotating Keys with the RKE CLI + +With managed configuration, RKE CLI has the ability to perform the key rotation process documented [here](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#rotating-a-decryption-key) with one command. To perform this operation, the following subcommand is used: +```bash +$ ./rke encrypt rotate-key --help +NAME: + rke encrypt rotate-key - Rotate cluster encryption provider key + +USAGE: + rke encrypt rotate-key [command options] [arguments...] + +OPTIONS: + --config value Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] + --ssh-agent-auth Use SSH Agent Auth defined by SSH_AUTH_SOCK + --ignore-docker-version Disable Docker version check + +``` +This command will perform the following actions: + +- Generate a new random 32-bit encryption key +- Generate a new provider configuration with the new key as the first provider and the second key as the second provider. When the secrets are rewritten, the first key will be used to encrypt the data on the write operation, while the second key (the old key) will be used to decrypt the stored data during the the read operation +- Deploy the new provider configuration to all `controlplane` nodes and restart the `kube-apiserver` +- Rewrite all secrets. This process will re-encrypt all the secrets with the new key. +- Update the configuration to remove the old key and restart the `kube-apiserver` + +### Rotating Keys by Disabling and Re-enabling Encryption in cluster.yml + +For a cluster with encryption enabled, you can rotate the encryption keys by updating `cluster.yml`. If you enable and re-enable the data encryption in the `cluster.yml`, RKE will not reuse old keys. Instead, it will generate new keys every time, yielding the same result as a key rotation with the RKE CLI. + +# Custom At-Rest Data Encryption Configuration +With managed configuration, RKE provides the user with a very simple way to enable and disable encryption with minimal interaction and configuration. However, it doesn't allow for any customization to the configuration. + +With custom encryption configuration, RKE allows the user to provide their own configuration. Although RKE will help the user to deploy the configuration and rewrite the secrets if needed, it doesn't provide a configuration validation on user's behalf. It's the user responsibility to make sure their configuration is valid. + +>**Warning:** Using invalid Encryption Provider Configuration could cause several issues with your cluster, ranging from crashing the Kubernetes API service, `kube-api`, to completely losing access to encrypted data. + +### Example: Using Custom Encryption Configuration with Amazon KMS + +An example for custom configuration would be enabling an external key management system like [Amazon KMS](https://aws.amazon.com/kms/). The following is an example of the configuration for AWS KMS: + +```yaml + +services: + kube-api: + extra_binds: + - "/var/run/kmsplugin/:/var/run/kmsplugin/" + secrets_encryption_config: + enabled: true + custom_config: + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - kms: + name: aws-encryption-provider + endpoint: unix:///var/run/kmsplugin/socket.sock + cachesize: 1000 + timeout: 3s + - identity: {} +``` + +Documentation for AWS KMS can be found [here](https://github.com/kubernetes-sigs/aws-encryption-provider). When Custom Configuration is set to to enable the AWS KMS provider, you should consider the following points: + +- Since RKE runs the `kube-api` service in a container, it's required that you use the `extra_binds` feature to bind-mount the KMS provider socket location inside the `kube-api` container. +- The AWS KMS provider runs as a pod in the cluster. Therefor, the proper way to enable it is to: + 1. Deploy your cluster with at-rest encryption disabled. + 2. Deploy the KMS pod and make sure it's working correctly. + 3. Update your cluster with the custom encryption configuration to utilize the KMS provider. +- Kube API connects to the KMS provider using a Unix socket. You should configure your KMS deployment to run pods on all `controlplane` nodes in the cluster. +- Your `controlplane` node should be configured with an AMI profile that has access to the KMS key you used in your configuration. + +### How to Prevent Restore Failures after Rotating Keys +It's important to understand that enabling encryption for you cluster means that you can no longer access encrypted data in your etcd database and/or etcd database backups without using your encryption keys. + +The encryption configuration is stored in the cluster state file `cluster.rkestate`, which is decoupled from the etcd backups. For example, in any of the following backup cases, the restore process will fail: + +- The snapshot is taken while encryption is enabled and restored when it's disabled. In this case, the encryption keys are no longer stored in the cluster state. +- The snapshot is taken before the keys are rotated and restore is attempted after. In this case, the old keys used for encryption at the time of the snapshot no longer exist in the cluster state file. + +Therefore, we recommend that when you enable or disable encryption, or when you rotate keys, you should [create a snapshot]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots/) so that your backup requires the same keys that you have access to. + +This also means you should not rotate the keys during the restore process, because you would lose the encryption keys in `cluster.rkestate`. + +The same applies to the custom configuration use case, however in this case it will depend on the user-provided encryption configuration. diff --git a/content/rke/latest/en/config-options/services/_index.md b/content/rke/latest/en/config-options/services/_index.md index 6fb743534f6..cfd88cc39a9 100644 --- a/content/rke/latest/en/config-options/services/_index.md +++ b/content/rke/latest/en/config-options/services/_index.md @@ -1,5 +1,6 @@ --- -title: Kubernetes Default Services +title: Default Kubernetes Services +description: To deploy Kubernetes, RKE deploys several default Kubernetes services. Read about etcd, kube-api server, kubelet, kube-proxy and more weight: 230 --- @@ -7,13 +8,22 @@ To deploy Kubernetes, RKE deploys several core components or services in Docker **All services support additional [custom arguments, Docker mount binds and extra environment variables]({{< baseurl >}}/rke/latest/en/config-options/services/services-extras/).** +| Component | Services key name in cluster.yml | +|-------------------------|----------------------------------| +| etcd | `etcd` | +| kube-apiserver | `kube-api` | +| kube-controller-manager | `kube-controller` | +| kubelet | `kubelet` | +| kube-scheduler | `scheduler` | +| kube-proxy | `kubeproxy` | + ## etcd -Kubernetes uses [etcd](https://github.com/coreos/etcd/blob/master/Documentation/docs.md) as a store for cluster state and data. Etcd is a reliable, consistent and distributed key-value store. +Kubernetes uses [etcd](https://etcd.io/) as a store for cluster state and data. Etcd is a reliable, consistent and distributed key-value store. RKE supports running etcd in a single node mode or in HA cluster mode. It also supports adding and removing etcd nodes to the cluster. -You can enable etcd to [take recurring snapshots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#recurring-snapshots). These snapshots can be used to [restore etcd]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). +You can enable etcd to [take recurring snapshots]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#recurring-snapshots). These snapshots can be used to [restore etcd]({{< baseurl >}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). By default, RKE will deploy a new etcd service, but you can also run Kubernetes with an [external etcd service]({{< baseurl >}}/rke/latest/en/config-options/services/external-etcd/). @@ -35,6 +45,8 @@ services: # Enable AlwaysPullImages Admission controller plugin # Available as of v0.2.0 always_pull_images: false + secrets_encryption_config: + enabled: true ``` ### Kubernetes API Server Options @@ -46,7 +58,7 @@ RKE supports the following options for the `kube-api` service : - **Pod Security Policy** (`pod_security_policy`) - An option to enable the [Kubernetes Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). By default, we do not enable pod security policies as it is set to `false`. > **Note:** If you set `pod_security_policy` value to `true`, RKE will configure an open policy to allow any pods to work on the cluster. You will need to configure your own policies to fully utilize PSP. - **Always Pull Images** (`always_pull_images`) - Enable `AlwaysPullImages` Admission controller plugin. Enabling `AlwaysPullImages` is a security best practice. It forces Kubernetes to validate the image and pull credentials with the remote image registry. Local image layer cache will still be used, but it does add a small bit of overhead when launching containers to pull and compare image hashes. _Note: Available as of v0.2.0_ - +- **Secrets Encryption Config** (`secrets_encryption_config`) - Manage Kubernetes at-rest data encryption. Documented [here]({{< baseurl >}}//rke/latest/en/config-options/secrets-encryption) ## Kubernetes Controller Manager > **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) when creating [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. @@ -83,6 +95,8 @@ services: cluster_dns_server: 10.43.0.10 # Fail if swap is on fail_swap_on: false + # Generate per node serving certificate + generate_serving_certificate: false ``` ### Kubelet Options @@ -92,6 +106,15 @@ RKE supports the following options for the `kubelet` service: - **Cluster Domain** (`cluster_domain`) - The [base domain](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) for the cluster. All services and DNS records created on the cluster. By default, the domain is set to `cluster.local`. - **Cluster DNS Server** (`cluster_dns_server`) - The IP address assigned to the DNS service endpoint within the cluster. DNS queries will be sent to this IP address which is used by KubeDNS. The default value for this option is `10.43.0.10` - **Fail if Swap is On** (`fail_swap_on`) - In Kubernetes, the default behavior for the kubelet is to **fail** if swap is enabled on the node. RKE does **not** follow this default and allows deployments on nodes with swap enabled. By default, the value is `false`. If you'd like to revert to the default kubelet behavior, set this option to `true`. +- **Generate Serving Certificate** (`generate_serving_certificate`) - Generate a certificate signed by the `kube-ca` Certificate Authority for the kubelet to use as a server certificate. The default value for this option is `false`. Before enabling this option, please read [the requirements](#kubelet-serving-certificate-requirements) + +### Kubelet Serving Certificate Requirements + +If `hostname_override` is configured for one or more nodes in `cluster.yml`, please make sure the correct IP address is configured in `address` (and the internal address in `internal_address`) to make sure the generated certificate contains the correct IP address(es). + +An example of an error situation is an EC2 instance where the the public IP address is configured in `address`, and `hostname_override` is used, the connection between `kube-apiserver` and `kubelet` will fail because the `kubelet` will be contacted on the private IP address and the generated certificate will not be valid (the error `x509: certificate is valid for value_in_address, not private_ip` will be seen). The resolution is to provide the internal IP address in `internal_address`. + +For more information on host overrides, refer to the [node configuration page.]({{}}/rke/latest/en/config-options/nodes/#overriding-the-hostname) ## Kubernetes Scheduler diff --git a/content/rke/latest/en/config-options/services/services-extras/_index.md b/content/rke/latest/en/config-options/services/services-extras/_index.md index 867d6a98fa2..57f623800ab 100644 --- a/content/rke/latest/en/config-options/services/services-extras/_index.md +++ b/content/rke/latest/en/config-options/services/services-extras/_index.md @@ -9,10 +9,16 @@ RKE supports additional service arguments, volume binds and environment variable For any of the Kubernetes services, you can update the `extra_args` to change the existing defaults. -As of `v0.1.3`, using `extra_args` will add new arguments and **override** any existing defaults. For example, if you need to modify the default admission controllers list, you need to include the default list and edit it with your changes so all changes are included. +As of `v0.1.3`, using `extra_args` will add new arguments and **override** any existing defaults. For example, if you need to modify the default admission plugins list, you need to include the default list and edit it with your changes so all changes are included. Prior to `v0.1.3`, using `extra_args` would only add new arguments to the list and there was no ability to change the default list. +All service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version): + +- For RKE v0.3.0+, the service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version). The service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). The default list of admissions plugins is the same for all Kubernetes versions and is located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go#L11). + +- For RKE prior to v0.3.0, the service defaults and admission plugins are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version) and located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). + ```yaml services: kube-controller: diff --git a/content/rke/latest/en/config-options/system-images/_index.md b/content/rke/latest/en/config-options/system-images/_index.md index 3fbcaff40e0..ae16387c7cc 100644 --- a/content/rke/latest/en/config-options/system-images/_index.md +++ b/content/rke/latest/en/config-options/system-images/_index.md @@ -6,9 +6,13 @@ When RKE is deploying Kubernetes, there are several images that are pulled. Thes As of `v0.1.6`, the functionality of a couple of the system images were consolidated into a single `rancher/rke-tools` image to simplify and speed the deployment process. -You can configure the [network plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/), [ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [dns provider]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) as well as the options for these add-ons separately. +You can configure the [network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/), [ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [dns provider]({{}}/rke/latest/en/config-options/add-ons/dns/) as well as the options for these add-ons separately in the `cluster.yml`. -This is the example of the full list of system images used to deploy Kubernetes through RKE. The image tags are dependent on the [Kubernetes image/version used](https://github.com/rancher/types/blob/master/apis/management.cattle.io/v3/k8s_defaults.go). +Below is an example of the list of system images used to deploy Kubernetes through RKE. The default versions of Kubernetes are tied to specific versions of system images. + +- For RKE v0.2.x and below, the map of versions and the system image versions is located here: https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go + +- For RKE v0.3.0 and above, the map of versions and the system image versions is located here: https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go > **Note:** As versions of RKE are released, the tags on these images will no longer be up to date. This list is specific for `v1.10.3-rancher2`. diff --git a/content/rke/latest/en/etcd-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/_index.md index efe05c2fb39..d973feb3d2f 100644 --- a/content/rke/latest/en/etcd-snapshots/_index.md +++ b/content/rke/latest/en/etcd-snapshots/_index.md @@ -11,295 +11,22 @@ RKE clusters can be configured to automatically take snapshots of etcd. In a dis _Available as of v0.2.0_ -RKE can also upload your snapshots to a S3 compatible backend. Additionally, the **pki.bundle.tar.gz** file usage is no longer required as v0.2.0 has changed how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). +RKE can upload your snapshots to a S3 compatible backend. -## One-Time Snapshots +**Note:** As of RKE v0.2.0, the `pki.bundle.tar.gz` file is no longer required because of a change in how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). -The `rke etcd snapshot-save` command will save a snapshot of etcd from each etcd node in the cluster config file. The snapshot is saved in `/opt/rke/etcd-snapshots`. When running the command, an additional container is created to take the snapshot. When the snapshot is completed, the container is automatically removed. +# Backing Up a Cluster -Prior to v0.2.0, along with the individual snapshot, RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process in versions prior to v0.2.0. +You can create [one-time snapshots]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots) to back up your cluster, and you can also configure [recurring snapshots]({{}}/rke/latest/en/etcd-snapshots/recurring-snapshots). -As of v0.2.0, the one-time snapshot can be uploaded to a S3 compatible backend by using the additional options to specify the S3 backend. +# Restoring a Cluster from Backup -### Options for `rke etcd snapshot-save` +You can use RKE to [restore your cluster from backup]({{}}/rke/latest/en/etcd-snapshots/restoring-from-backup). -| Option | Description | S3 Specific | -| --- | --- | --- | -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] | | -| `--s3` | Enabled backup to s3 | * | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--access-key` value | Specify s3 accessKey | * | -| `--secret-key` value | Specify s3 secretKey | * | -| `--bucket-name` value | Specify s3 bucket name | * | -| `--region` value | Specify the s3 bucket location (optional) | * | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | +# Example Scenarios -### IAM Support for Storing Snapshots in S3 -In addition to API access keys, RKE supports using IAM roles for S3 authentication. The cluster etcd nodes must be assigned an IAM role that has read/write access to the designated backup bucket on S3. Also, the nodes must have network access to the S3 endpoint specified. - - To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -### Local One-Time Snapshot Example - -``` -$ rke etcd snapshot-save --config cluster.yml --name snapshot-name -``` - -The snapshot is saved in `/opt/rke/etcd-snapshots` - -### One-Time Snapshots uploaded to S3 Example - -_Available as of v0.2.0_ - -``` -$ rke etcd snapshot-save --config cluster.yml --name snapshot-name \ ---s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com -``` - -The snapshot is saved in `/opt/rke/etcd-snapshots` as well as uploaded to the S3 backend. - -## Recurring Snapshots - -To schedule automatic recurring etcd snapshots, you can enable the `etcd-snapshot` service with [extra configuration options the etcd service](#options-for-the-etcd-snapshot-service). `etcd-snapshot` runs in a service container alongside the `etcd` container. By default, the `etcd-snapshot` service takes a snapshot for every node that has the `etcd` role and stores them to local disk in `/opt/rke/etcd-snapshots`. If you set up the [options for S3](#options-for-the-etcd-snapshot-service), the snapshot will also be uploaded to the S3 backend. - -Prior to v0.2.0, along with the snapshots, RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process in versions prior to v0.2.0. - -When a cluster is launched with the `etcd-snapshot` service enabled, you can view the `etcd-rolling-snapshots` logs to confirm backups are being created automatically. - -``` -$ docker logs etcd-rolling-snapshots - -time="2018-05-04T18:39:16Z" level=info msg="Initializing Rolling Backups" creation=1m0s retention=24h0m0s -time="2018-05-04T18:40:16Z" level=info msg="Created backup" name="2018-05-04T18:40:16Z_etcd" runtime=108.332814ms -time="2018-05-04T18:41:16Z" level=info msg="Created backup" name="2018-05-04T18:41:16Z_etcd" runtime=92.880112ms -time="2018-05-04T18:42:16Z" level=info msg="Created backup" name="2018-05-04T18:42:16Z_etcd" runtime=83.67642ms -time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18:43:16Z_etcd" runtime=86.298499ms -``` - -### Options for the `Etcd-Snapshot` Service - -Depending on your version of RKE, the options used to configure recurring snapshots may be different. - -_Available as of v0.2.0_ - -|Option|Description| S3 Specific | -|---|---| --- | -|**interval_hours**| The duration in hours between recurring backups. This supercedes the `creation` option and will override it if both are specified.| | -|**retention**| The number of snapshots to retain before rotation. This supercedes the `retention` option and will override it if both are specified.| | -|**bucket_name**| S3 bucket name where backups will be stored| * | -|**access_key**| S3 access key with permission to access the backup bucket.| * | -|**secret_key** |S3 secret key with permission to access the backup bucket.| * | -|**region** |S3 region for the backup bucket. This is optional.| * | -|**endpoint** |S3 regions endpoint for the backup bucket.| * | - -
- - -```yaml -services: - etcd: - backup_config: - interval_hours: 12 - retention: 6 - s3backupconfig: - access_key: S3_ACCESS_KEY - secret_key: S3_SECRET_KEY - bucket_name: s3-bucket-name - region: "" - endpoint: s3.amazonaws.com -``` - -#### Prior to v0.2.0 - -|Option|Description| -|---|---| -|**Snapshot**|By default, the recurring snapshot service is disabled. To enable the service, you need to define it as part of `etcd` and set it to `true`.| -|**Creation**|By default, the snapshot service will take snapshots every 5 minutes (`5m0s`). You can change the time between snapshots as part of the `creation` directive for the `etcd` service.| -|**Retention**|By default, all snapshots are saved for 24 hours (`24h`) before being deleted and purged. You can change how long to store a snapshot as part of the `retention` directive for the `etcd` service.| - -```yaml -services: - etcd: - snapshot: true - creation: 5m0s - retention: 24h -``` - -## Etcd Disaster Recovery - -If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot. RKE also removes the old `etcd` container before creating a new `etcd` cluster using the snapshot that you have chosen. - ->**Warning:** Restoring an etcd snapshot deletes your current etcd cluster and replaces it with a new one. Before you run the `rke etcd snapshot-restore` command, you should back up any important data in your cluster. - -The snapshot used to restore your etcd cluster can either be stored locally in `/opt/rke/etcd-snapshots` or from a S3 compatible backend. The S3 backend option is available as of v0.2.0. - -### Options for `rke etcd snapshot-restore` - -| Option | Description | S3 Specific | -| --- | --- | ---| -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] | | -| `--s3` | Enabled backup to s3 |* | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--access-key` value | Specify s3 accessKey | *| -| `--secret-key` value | Specify s3 secretKey | *| -| `--bucket-name` value | Specify s3 bucket name | *| -| `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | - -### Example of Restoring from a Local Snapshot - -When restoring etcd from a local snapshot, the snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. In versions prior to v0.2.0, the `pki.bundle.tar.gz` file is also expected to be in the same location. As of v0.2.0, this file is no longer needed as v0.2.0 has changed how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). - -``` -$ rke etcd snapshot-restore --config cluster.yml --name mysnapshot -``` - -### Example of Restoring from a Snapshot in S3 - -_Available as of v0.2.0_ - -> **Note:** Ensure your `cluster.rkestate` is present before starting the restore, as this contains your certificate data for the cluster - -When restoring etcd from a snapshot located in S3, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. - -```shell -$ rke etcd snapshot-restore --config cluster.yml --name snapshot-name \ ---s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com -``` -> **Note:** if you were restoring a cluster that had rancher installed the UI should start-up after a few minutes; you don't need to re-run helm. - -### Example Scenario of restoring from a Local Snapshot - -In this example, the Kubernetes cluster was deployed on two AWS nodes. - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| node2 | 10.0.0.2 | [etcd] | - -### Back up the `etcd` cluster - -Take a local snapshot of the Kubernetes cluster. As of v0.2.0, you can also upload this snapshot directly to a S3 backend with the [S3 options](#options-for-rke-etcd-snapshot-save). - -``` -$ rke etcd snapshot-save --name snapshot.db --config cluster.yml -``` - -![etcd snapshot]({{< baseurl >}}/img/rke/rke-etcd-backup.png) - - -### Store the Snapshot Externally in S3 - -As of v0.2.0, this step is no longer required, as RKE can upload and download snapshots automatically from S3 by adding in [S3 options](#options-for-rke-etcd-snapshot-save) when running the `rke etcd snapshot-save` command. - -After taking the etcd snapshot on `node2`, we recommend saving this backup in a persistence place. One of the options is to save the backup and `pki.bundle.tar.gz` file on a S3 bucket or tape backup. - -> **Note:** As of v0.2.0, the file **pki.bundle.tar.gz** is no longer required for the restore process. - -``` -# If you're using an AWS host and have the ability to connect to S3 -root@node2:~# s3cmd mb s3://rke-etcd-backup -root@node2:~# s3cmd /opt/rke/etcd-snapshots/snapshot.db /opt/rke/etcd-snapshots/pki.bundle.tar.gz s3://rke-etcd-backup/ -``` - -### Place the backup on a new node - -To simulate the failure, let's power down `node2`. - -``` -root@node2:~# poweroff -``` - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| ~~node2~~ | ~~10.0.0.2~~ | ~~[etcd]~~ | -| node3 | 10.0.0.3 | [etcd] | -| | | | - - -Before restoring etcd and running `rke up`, we need to retrieve the backup saved on S3 to a new node, e.g. `node3`. As of v0.2.0, you can directly retrieve the snapshot from S3 when running the restore command, so this step is for users who stored the snapshot externally without using the integrated S3 options. - -``` -# Make a Directory -root@node3:~# mkdir -p /opt/rke/etcdbackup -# Get the Backup from S3 -root@node3:~# s3cmd get s3://rke-etcd-backup/snapshot.db /opt/rke/etcd-snapshots/snapshot.db -# Get the pki bundle from S3, only needed prior to v0.2.0 -root@node3:~# s3cmd get s3://rke-etcd-backup/pki.bundle.tar.gz /opt/rke/etcd-snapshots/pki.bundle.tar.gz -``` - -### Restore `etcd` on the new node from the backup - -Before updating and restoring etcd, you will need to add the new node into the Kubernetes cluster with the `etcd` role. In the `cluster.yml`, comment out the old node and add in the new node. ` - -```yaml -nodes: - - address: 10.0.0.1 - hostname_override: node1 - user: ubuntu - role: - - controlplane - - worker -# - address: 10.0.0.2 -# hostname_override: node2 -# user: ubuntu -# role: -# - etcd - - address: 10.0.0.3 - hostname_override: node3 - user: ubuntu - role: - - etcd -``` - -After the new node is added to the `cluster.yml`, run `rke etcd snapshot-restore` to launch `etcd` from the backup. The snapshot and `pki.bundle.tar.gz` file are expected to be saved at `/opt/rke/etcd-snapshots`. -As of v0.2.0, if you want to directly retrieve the snapshot from S3, add in the [S3 options](#options-for-rke-etcd-snapshot-restore). - -> **Note:** As of v0.2.0, the file **pki.bundle.tar.gz** is no longer required for the restore process as the certificates required to restore are preserved within the `cluster.rkestate` - -``` -$ rke etcd snapshot-restore --name snapshot.db --config cluster.yml -``` - -Finally, we need to restore the operations on the cluster by making the Kubernetes API point to the new `etcd` by running `rke up` again using the new `cluster.yml`. - -``` -$ rke up --config cluster.yml -``` - -Confirm that your Kubernetes cluster is functional by checking the pods on your cluster. - -``` -> kubectl get pods -NAME READY STATUS RESTARTS AGE -nginx-65899c769f-kcdpr 1/1 Running 0 17s -nginx-65899c769f-pc45c 1/1 Running 0 17s -nginx-65899c769f-qkhml 1/1 Running 0 17s -``` +These [example scenarios]({{}}/rke/latest/en/etcd-snapshots/example-scenarios) for backup and restore are different based on your version of RKE. ## Troubleshooting -As of **v0.1.9**, the **rke-bundle-cert** container is removed on both success and failure of a restore. To debug any issues, you will need to look at the **logs** generated from rke. - -As of **v0.1.8** and below, the **rke-bundle-cert** container is left over from a failed etcd restore. If you are having an issue with restoring an **etcd snapshot** then you can do the following on each etcd nodes before attempting to do another restore: - -``` -docker container rm --force rke-bundle-cert -``` - -The rke-bundle-cert container is usually removed when a backup or restore of **etcd** succeeds. Whenever something goes wrong, the **rke-bundle-cert** container will be left over. You can look -at the logs or inspect the container to see what the issue is. - -``` -docker container logs --follow rke-bundle-cert -docker container inspect rke-bundle-cert -``` - -The important thing to note is the mounts of the container and location of the **pki.bundle.tar.gz**. +If you have trouble restoring your cluster, you can refer to the [troubleshooting]({{}}/rke/latest/en/etcd-snapshots/troubleshooting) page. diff --git a/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md b/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md new file mode 100644 index 00000000000..e57fd3d5871 --- /dev/null +++ b/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md @@ -0,0 +1,253 @@ +--- +title: Example Scenarios +weight: 4 +--- + +These example scenarios for backup and restore are different based on your version of RKE. + +{{% tabs %}} +{{% tab "RKE v0.2.0+" %}} + +This walkthrough will demonstrate how to restore an etcd cluster from a local snapshot with the following steps: + +1. [Back up the cluster](#1-back-up-the-cluster) +1. [Simulate a node failure](#2-simulate-a-node-failure) +1. [Add a new etcd node to the cluster](#3-add-a-new-etcd-node-to-the-kubernetes-cluster) +1. [Restore etcd on the new node from the backup](#4-restore-etcd-on-the-new-node-from-the-backup) +1. [Confirm that cluster operations are restored](#5-confirm-that-cluster-operations-are-restored) + +In this example, the Kubernetes cluster was deployed on two AWS nodes. + +| Name | IP | Role | +|:-----:|:--------:|:----------------------:| +| node1 | 10.0.0.1 | [controlplane, worker] | +| node2 | 10.0.0.2 | [etcd] | + + +### 1. Back Up the Cluster + +Take a local snapshot of the Kubernetes cluster. + +You can upload this snapshot directly to an S3 backend with the [S3 options]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots/#options-for-rke-etcd-snapshot-save). + +``` +$ rke etcd snapshot-save --name snapshot.db --config cluster.yml +``` + +{{< img "/img/rke/rke-etcd-backup.png" "etcd snapshot" >}} + +### 2. Simulate a Node Failure + +To simulate the failure, let's power down `node2`. + +``` +root@node2:~# poweroff +``` + +| Name | IP | Role | +|:-----:|:--------:|:----------------------:| +| node1 | 10.0.0.1 | [controlplane, worker] | +| ~~node2~~ | ~~10.0.0.2~~ | ~~[etcd]~~ | + +### 3. Add a New etcd Node to the Kubernetes Cluster + +Before updating and restoring etcd, you will need to add the new node into the Kubernetes cluster with the `etcd` role. In the `cluster.yml`, comment out the old node and add in the new node. + +```yaml +nodes: + - address: 10.0.0.1 + hostname_override: node1 + user: ubuntu + role: + - controlplane + - worker +# - address: 10.0.0.2 +# hostname_override: node2 +# user: ubuntu +# role: +# - etcd + - address: 10.0.0.3 + hostname_override: node3 + user: ubuntu + role: + - etcd +``` + +### 4. Restore etcd on the New Node from the Backup + +> **Prerequisite:** Ensure your `cluster.rkestate` is present before starting the restore, because this contains your certificate data for the cluster. + +After the new node is added to the `cluster.yml`, run the `rke etcd snapshot-restore` to launch `etcd` from the backup: + +``` +$ rke etcd snapshot-restore --name snapshot.db --config cluster.yml +``` + +The snapshot is expected to be saved at `/opt/rke/etcd-snapshots`. + +If you want to directly retrieve the snapshot from S3, add in the [S3 options](#options-for-rke-etcd-snapshot-restore). + +> **Note:** As of v0.2.0, the file `pki.bundle.tar.gz` is no longer required for the restore process because the certificates required to restore are preserved within the `cluster.rkestate`. + +### 5. Confirm that Cluster Operations are Restored + +The `rke etcd snapshot-restore` command triggers `rke up` using the new `cluster.yml`. Confirm that your Kubernetes cluster is functional by checking the pods on your cluster. + +``` +> kubectl get pods +NAME READY STATUS RESTARTS AGE +nginx-65899c769f-kcdpr 1/1 Running 0 17s +nginx-65899c769f-pc45c 1/1 Running 0 17s +nginx-65899c769f-qkhml 1/1 Running 0 17s +``` + +{{% /tab %}} +{{% tab "RKE prior to v0.2.0" %}} + +This walkthrough will demonstrate how to restore an etcd cluster from a local snapshot with the following steps: + +1. [Take a local snapshot of the cluster](#take-a-local-snapshot-of-the-cluster-rke-prior-to-v0.2.0) +1. [Store the snapshot externally](#store-the-snapshot-externally-rke-prior-to-v0.2.0) +1. [Simulate a node failure](#simulate-a-node-failure-rke-prior-to-v0.2.0) +1. [Remove the Kubernetes cluster and clean the nodes](#remove-the-kubernetes-cluster-and-clean-the-nodes-rke-prior-to-v0.2.0) +1. [Retrieve the backup and place it on a new node](#retrieve-the-backup-and-place-it-on-a-new-node-rke-prior-to-v0.2.0) +1. [Add a new etcd node to the Kubernetes cluster](#add-a-new-etcd-node-to-the-kubernetes-cluster-rke-prior-to-v0.2.0) +1. [Restore etcd on the new node from the backup](#restore-etcd-on-the-new-node-from-the-backup-rke-prior-to-v0.2.0) +1. [Restore Operations on the Cluster](#restore-operations-on-the-cluster-rke-prior-to-v0.2.0) + +### Example Scenario of restoring from a Local Snapshot + +In this example, the Kubernetes cluster was deployed on two AWS nodes. + +| Name | IP | Role | +|:-----:|:--------:|:----------------------:| +| node1 | 10.0.0.1 | [controlplane, worker] | +| node2 | 10.0.0.2 | [etcd] | + + +### 1. Take a Local Snapshot of the Cluster + +Back up the Kubernetes cluster by taking a local snapshot: + +``` +$ rke etcd snapshot-save --name snapshot.db --config cluster.yml +``` + +{{< img "/img/rke/rke-etcd-backup.png" "etcd snapshot" >}} + + +### 2. Store the Snapshot Externally + +After taking the etcd snapshot on `node2`, we recommend saving this backup in a persistent place. One of the options is to save the backup and `pki.bundle.tar.gz` file on an S3 bucket or tape backup. + +``` +# If you're using an AWS host and have the ability to connect to S3 +root@node2:~# s3cmd mb s3://rke-etcd-backup +root@node2:~# s3cmd \ + /opt/rke/etcd-snapshots/snapshot.db \ + /opt/rke/etcd-snapshots/pki.bundle.tar.gz \ + s3://rke-etcd-backup/ +``` + + +### 3. Simulate a Node Failure + +To simulate the failure, let's power down `node2`. + +``` +root@node2:~# poweroff +``` + +| Name | IP | Role | +|:-----:|:--------:|:----------------------:| +| node1 | 10.0.0.1 | [controlplane, worker] | +| ~~node2~~ | ~~10.0.0.2~~ | ~~[etcd]~~ | + + +### 4. Remove the Kubernetes Cluster and Clean the Nodes + +The following command removes your cluster and cleans the nodes so that the cluster can be restored without any conflicts: + +``` +rke remove --config rancher-cluster.yml +``` + + +### 5. Retrieve the Backup and Place it On a New Node + +Before restoring etcd and running `rke up`, we need to retrieve the backup saved on S3 to a new node, e.g. `node3`. + +``` +# Make a Directory +root@node3:~# mkdir -p /opt/rke/etcdbackup + +# Get the Backup from S3 +root@node3:~# s3cmd get \ + s3://rke-etcd-backup/snapshot.db \ + /opt/rke/etcd-snapshots/snapshot.db + +# Get the pki bundle from S3 +root@node3:~# s3cmd get \ + s3://rke-etcd-backup/pki.bundle.tar.gz \ + /opt/rke/etcd-snapshots/pki.bundle.tar.gz +``` + +> **Note:** If you had multiple etcd nodes, you would have to manually sync the snapshot and `pki.bundle.tar.gz` across all of the etcd nodes in the cluster. + + +### 6. Add a New etcd Node to the Kubernetes Cluster + +Before updating and restoring etcd, you will need to add the new node into the Kubernetes cluster with the `etcd` role. In the `cluster.yml`, comment out the old node and add in the new node. ` + +```yaml +nodes: + - address: 10.0.0.1 + hostname_override: node1 + user: ubuntu + role: + - controlplane + - worker +# - address: 10.0.0.2 +# hostname_override: node2 +# user: ubuntu +# role: +# - etcd + - address: 10.0.0.3 + hostname_override: node3 + user: ubuntu + role: + - etcd +``` + + +### 7. Restore etcd on the New Node from the Backup + +After the new node is added to the `cluster.yml`, run the `rke etcd snapshot-restore` command to launch `etcd` from the backup: + +``` +$ rke etcd snapshot-restore --name snapshot.db --config cluster.yml +``` + +The snapshot and `pki.bundle.tar.gz` file are expected to be saved at `/opt/rke/etcd-snapshots` on each etcd node. + + +### 8. Restore Operations on the Cluster + +Finally, we need to restore the operations on the cluster. We will make the Kubernetes API point to the new `etcd` by running `rke up` again using the new `cluster.yml`. + +``` +$ rke up --config cluster.yml +``` + +Confirm that your Kubernetes cluster is functional by checking the pods on your cluster. + +``` +> kubectl get pods +NAME READY STATUS RESTARTS AGE +nginx-65899c769f-kcdpr 1/1 Running 0 17s +nginx-65899c769f-pc45c 1/1 Running 0 17s +nginx-65899c769f-qkhml 1/1 Running 0 17s +``` + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md new file mode 100644 index 00000000000..b98f7e4ed42 --- /dev/null +++ b/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md @@ -0,0 +1,123 @@ +--- +title: One-time Snapshots +weight: 1 +--- + +One-time snapshots are handled differently depending on your version of RKE. + +{{% tabs %}} +{{% tab "RKE v0.2.0+" %}} + +To save a snapshot of etcd from each etcd node in the cluster config file, run the `rke etcd snapshot-save` command. + +The snapshot is saved in `/opt/rke/etcd-snapshots`. + +When running the command, an additional container is created to take the snapshot. When the snapshot is completed, the container is automatically removed. + +The one-time snapshot can be uploaded to a S3 compatible backend by using the additional options to specify the S3 backend. + +To create a local one-time snapshot, run: + +``` +$ rke etcd snapshot-save --config cluster.yml --name snapshot-name +``` + +**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots`. + +To save a one-time snapshot to S3, run: + +``` +$ rke etcd snapshot-save \ +--config cluster.yml \ +--name snapshot-name \ +--s3 \ +--access-key S3_ACCESS_KEY \ +--secret-key S3_SECRET_KEY \ +--bucket-name s3-bucket-name \ +--folder s3-folder-name \ # Optional - Available as of v0.3.0 +--s3-endpoint s3.amazonaws.com +``` + +**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots` as well as uploaded to the S3 backend. + +### Options for `rke etcd snapshot-save` + +| Option | Description | S3 Specific | +| --- | --- | --- | +| `--name` value | Specify snapshot name | | +| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | | +| `--s3` | Enabled backup to s3 | * | +| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | +| `--s3-endpoint-ca` value | Specify a path to a CA cert file to connect to a custom s3 endpoint (optional) _Available as of v0.2.5_ | * | +| `--access-key` value | Specify s3 accessKey | * | +| `--secret-key` value | Specify s3 secretKey | * | +| `--bucket-name` value | Specify s3 bucket name | * | +| `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. _Available as of v0.3.0_ | * | +| `--region` value | Specify the s3 bucket location (optional) | * | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | + +The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. + +##### Using a custom CA certificate for S3 + +_Available as of v2.2.5_ + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the `--s3-endpoint-ca` to connect to the S3 back end. + +### IAM Support for Storing Snapshots in S3 + +In addition to API access keys, RKE supports using IAM roles for S3 authentication. The cluster etcd nodes must be assigned an IAM role that has read/write access to the designated backup bucket on S3. Also, the nodes must have network access to the S3 endpoint specified. + +Below is an [example IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) that would allow nodes to store and retrieve backups from S3: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] +} +``` + +For details on giving an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +{{% /tab %}} +{{% tab "RKE prior to v0.2.0" %}} + +To save a snapshot of etcd from each etcd node in the cluster config file, run the `rke etcd snapshot-save` command. + +When running the command, an additional container is created to take the snapshot. When the snapshot is completed, the container is automatically removed. + +RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process. + +To create a local one-time snapshot, run: + +``` +$ rke etcd snapshot-save --config cluster.yml --name snapshot-name +``` + +**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots`. + +### Options for `rke etcd snapshot-save` + +| Option | Description | +| --- | --- | +| `--name` value | Specify snapshot name | +| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | +| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md new file mode 100644 index 00000000000..a5435ddfbe1 --- /dev/null +++ b/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md @@ -0,0 +1,134 @@ +--- +title: Recurring Snapshots +weight: 2 +--- + +Recurring snapshots are handled differently based on your version of RKE. + +{{% tabs %}} +{{% tab "RKE v0.2.0+"%}} + +To schedule automatic recurring etcd snapshots, you can enable the `etcd-snapshot` service with [extra configuration options](#options-for-the-etcd-snapshot-service). `etcd-snapshot` runs in a service container alongside the `etcd` container. By default, the `etcd-snapshot` service takes a snapshot for every node that has the `etcd` role and stores them to local disk in `/opt/rke/etcd-snapshots`. + +If you set up the [options for S3](#options-for-the-etcd-snapshot-service), the snapshot will also be uploaded to the S3 backend. + +### Snapshot Service Logging + +When a cluster is launched with the `etcd-snapshot` service enabled, you can view the `etcd-rolling-snapshots` logs to confirm backups are being created automatically. + +``` +$ docker logs etcd-rolling-snapshots + +time="2018-05-04T18:39:16Z" level=info msg="Initializing Rolling Backups" creation=1m0s retention=24h0m0s +time="2018-05-04T18:40:16Z" level=info msg="Created backup" name="2018-05-04T18:40:16Z_etcd" runtime=108.332814ms +time="2018-05-04T18:41:16Z" level=info msg="Created backup" name="2018-05-04T18:41:16Z_etcd" runtime=92.880112ms +time="2018-05-04T18:42:16Z" level=info msg="Created backup" name="2018-05-04T18:42:16Z_etcd" runtime=83.67642ms +time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18:43:16Z_etcd" runtime=86.298499ms +``` + +### Options for the `Etcd-Snapshot` Service + +|Option|Description| S3 Specific | +|---|---| --- | +|**interval_hours**| The duration in hours between recurring backups. This supercedes the `creation` option (which was used in RKE prior to v0.2.0) and will override it if both are specified.| | +|**retention**| The number of snapshots to retain before rotation. This supercedes the `retention` option and will override it if both are specified.| | +|**bucket_name**| S3 bucket name where backups will be stored| * | +|**folder**| Folder inside S3 bucket where backups will be stored. This is optional. _Available as of v0.3.0_ | * | +|**access_key**| S3 access key with permission to access the backup bucket.| * | +|**secret_key** |S3 secret key with permission to access the backup bucket.| * | +|**region** |S3 region for the backup bucket. This is optional.| * | +|**endpoint** |S3 regions endpoint for the backup bucket.| * | +|**custom_ca** |Custom certificate authority to use when connecting to the endpoint. Only required for private S3 compatible storage solutions. Available for RKE v0.2.5+.| * | + +The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. + +##### Using a custom CA certificate for S3 + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the option `custom_ca` to connect to the S3 backend. + +### IAM Support for Storing Snapshots in S3 + +In addition to API access keys, RKE supports using IAM roles for S3 authentication. The cluster etcd nodes must be assigned an IAM role that has read/write access to the designated backup bucket on S3. Also, the nodes must have network access to the S3 endpoint specified. + +Below is an [example IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) that would allow nodes to store and retrieve backups from S3: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] +} +``` + +For details on giving an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +### Configuring the Snapshot Service in YAML + +```yaml +services: + etcd: + backup_config: + interval_hours: 12 + retention: 6 + s3backupconfig: + access_key: S3_ACCESS_KEY + secret_key: S3_SECRET_KEY + bucket_name: s3-bucket-name + region: "" + folder: "" # Optional - Available as of v0.3.0 + endpoint: s3.amazonaws.com +``` + +{{% /tab %}} +{{% tab "RKE prior to v0.2.0"%}} + +To schedule automatic recurring etcd snapshots, you can enable the `etcd-snapshot` service with [extra configuration options](#options-for-the-local-etcd-snapshot-service). `etcd-snapshot` runs in a service container alongside the `etcd` container. By default, the `etcd-snapshot` service takes a snapshot for every node that has the `etcd` role and stores them to local disk in `/opt/rke/etcd-snapshots`. + +RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process in versions prior to v0.2.0. + +### Snapshot Service Logging + +When a cluster is launched with the `etcd-snapshot` service enabled, you can view the `etcd-rolling-snapshots` logs to confirm backups are being created automatically. + +``` +$ docker logs etcd-rolling-snapshots + +time="2018-05-04T18:39:16Z" level=info msg="Initializing Rolling Backups" creation=1m0s retention=24h0m0s +time="2018-05-04T18:40:16Z" level=info msg="Created backup" name="2018-05-04T18:40:16Z_etcd" runtime=108.332814ms +time="2018-05-04T18:41:16Z" level=info msg="Created backup" name="2018-05-04T18:41:16Z_etcd" runtime=92.880112ms +time="2018-05-04T18:42:16Z" level=info msg="Created backup" name="2018-05-04T18:42:16Z_etcd" runtime=83.67642ms +time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18:43:16Z_etcd" runtime=86.298499ms +``` + +### Options for the Local `Etcd-Snapshot` Service + +|Option|Description| +|---|---| +|**Snapshot**|By default, the recurring snapshot service is disabled. To enable the service, you need to define it as part of `etcd` and set it to `true`.| +|**Creation**|By default, the snapshot service will take snapshots every 5 minutes (`5m0s`). You can change the time between snapshots as part of the `creation` directive for the `etcd` service.| +|**Retention**|By default, all snapshots are saved for 24 hours (`24h`) before being deleted and purged. You can change how long to store a snapshot as part of the `retention` directive for the `etcd` service.| + +### Configuring the Snapshot Service in YAML + +```yaml +services: + etcd: + snapshot: true + creation: 5m0s + retention: 24h +``` + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md new file mode 100644 index 00000000000..a4e0ce38419 --- /dev/null +++ b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md @@ -0,0 +1,116 @@ +--- +title: Restoring from Backup +weight: 3 +--- + +The details of restoring your cluster from backup are different depending on your version of RKE. + +{{% tabs %}} +{{% tab "RKE v0.2.0+"%}} + +If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. + +The following actions will be performed when you run the command: + +- Syncs the snapshot or downloads the snapshot from S3, if necessary. +- Checks snapshot checksum across etcd nodes to make sure they are identical. +- Deletes your current cluster and cleans old data by running `rke remove`. This removes the entire Kubernetes cluster, not just the etcd cluster. +- Rebuilds the etcd cluster from the chosen snapshot. +- Creates a new cluster by running `rke up`. +- Restarts cluster system pods. + +>**Warning:** You should back up any important data in your cluster before running `rke etcd snapshot-restore` because the command deletes your current Kubernetes cluster and replaces it with a new one. + +The snapshot used to restore your etcd cluster can either be stored locally in `/opt/rke/etcd-snapshots` or from a S3 compatible backend. + +### Example of Restoring from a Local Snapshot + +To restore etcd from a local snapshot, run: + +``` +$ rke etcd snapshot-restore --config cluster.yml --name mysnapshot +``` + +The snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. + +**Note:** The `pki.bundle.tar.gz` file is not needed because RKE v0.2.0 changed how the [Kubernetes cluster state is stored]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state). + +### Example of Restoring from a Snapshot in S3 + +> **Prerequisite:** Ensure your `cluster.rkestate` is present before starting the restore, because this contains your certificate data for the cluster. + +When restoring etcd from a snapshot located in S3, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. + +```shell +$ rke etcd snapshot-restore \ +--config cluster.yml \ +--name snapshot-name \ +--s3 \ +--access-key S3_ACCESS_KEY \ +--secret-key S3_SECRET_KEY \ +--bucket-name s3-bucket-name \ +--folder s3-folder-name \ # Optional - Available as of v0.3.0 +--s3-endpoint s3.amazonaws.com +``` +**Note:** if you were restoring a cluster that had Rancher installed, the Rancher UI should start up after a few minutes; you don't need to re-run Helm. + +### Options for `rke etcd snapshot-restore` + +| Option | Description | S3 Specific | +| --- | --- | ---| +| `--name` value | Specify snapshot name | | +| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | | +| `--s3` | Enabled backup to s3 |* | +| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | +| `--access-key` value | Specify s3 accessKey | *| +| `--secret-key` value | Specify s3 secretKey | *| +| `--bucket-name` value | Specify s3 bucket name | *| +| `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. This is optional. _Available as of v0.3.0_ | *| +| `--region` value | Specify the s3 bucket location (optional) | *| +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | + +{{% /tab %}} +{{% tab "RKE prior to v0.2.0"%}} + +If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. + +The following actions will be performed when you run the command: + +- Removes the old etcd cluster +- Rebuilds the etcd cluster using the local snapshot + +Before you run this command, you must: + +- Run `rke remove` to remove your Kubernetes cluster and clean the nodes +- Download your etcd snapshot from S3, if applicable. Place the etcd snapshot and the `pki.bundle.tar.gz` file in `/opt/rke/etcd-snapshots`. Manually sync the snapshot across all `etcd` nodes. + +After the restore, you must rebuild your Kubernetes cluster with `rke up`. + +>**Warning:** You should back up any important data in your cluster before running `rke etcd snapshot-restore` because the command deletes your current etcd cluster and replaces it with a new one. + +### Example of Restoring from a Local Snapshot + +To restore etcd from a local snapshot, run: + +``` +$ rke etcd snapshot-restore --config cluster.yml --name mysnapshot +``` + +The snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. + +The snapshot must be manually synched across all `etcd` nodes. + +The `pki.bundle.tar.gz` file is also expected to be in the same location. + +### Options for `rke etcd snapshot-restore` + +| Option | Description | +| --- | --- | +| `--name` value | Specify snapshot name | +| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{< baseurl >}}/rke/latest/en/config-options/#ssh-agent) | +| `--ignore-docker-version` | [Disable Docker version check]({{< baseurl >}}/rke/latest/en/config-options/#supported-docker-versions) | + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md b/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md new file mode 100644 index 00000000000..372142f649a --- /dev/null +++ b/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md @@ -0,0 +1,22 @@ +--- +title: Troubleshooting +weight: 5 +--- + +As of **v0.1.9**, the **rke-bundle-cert** container is removed on both success and failure of a restore. To debug any issues, you will need to look at the **logs** generated from rke. + +As of **v0.1.8** and below, the **rke-bundle-cert** container is left over from a failed etcd restore. If you are having an issue with restoring an **etcd snapshot** then you can do the following on each etcd nodes before attempting to do another restore: + +``` +docker container rm --force rke-bundle-cert +``` + +The rke-bundle-cert container is usually removed when a backup or restore of **etcd** succeeds. Whenever something goes wrong, the **rke-bundle-cert** container will be left over. You can look +at the logs or inspect the container to see what the issue is. + +``` +docker container logs --follow rke-bundle-cert +docker container inspect rke-bundle-cert +``` + +The important thing to note is the mounts of the container and location of the `pki.bundle.tar.gz`. diff --git a/content/rke/latest/en/example-yamls/_index.md b/content/rke/latest/en/example-yamls/_index.md index 0b9db0fbc29..9b155eecca8 100644 --- a/content/rke/latest/en/example-yamls/_index.md +++ b/content/rke/latest/en/example-yamls/_index.md @@ -49,7 +49,8 @@ nodes: labels: app: ingress -# If set to true, RKE will not fail when unsupported Docker version are found +# If set to true, RKE will not fail when unsupported Docker version +# are found ignore_docker_version: false # Cluster level SSH private key @@ -57,15 +58,20 @@ ignore_docker_version: false ssh_key_path: ~/.ssh/test # Enable use of SSH agent to use SSH private keys with passphrase -# This requires the environment `SSH_AUTH_SOCK` configured pointing to your SSH agent which has the private key added +# This requires the environment `SSH_AUTH_SOCK` configured pointing +#to your SSH agent which has the private key added ssh_agent_auth: true # List of registry credentials -# If you are using a Docker Hub registry, you can omit the `url` or set it to `docker.io` +# If you are using a Docker Hub registry, you can omit the `url` +# or set it to `docker.io` +# is_default set to `true` will override the system default +# registry set in the global settings private_registries: - - url: registry.com - user: Username - password: password + - url: registry.com + user: Username + password: password + is_default: true # Bastion/Jump host configuration bastion_host: @@ -83,12 +89,34 @@ bastion_host: cluster_name: mycluster -# The kubernetes version used. For now, this should match the version defined in rancher/types defaults map: https://github.com/rancher/types/blob/master/apis/management.cattle.io/v3/k8s_defaults.go#L14 -# In case the kubernetes_version and kubernetes image in system_images are defined, the system_images configuration will take precedence over kubernetes_version. +# The Kubernetes version used. The default versions of Kubernetes +# are tied to specific versions of the system images. +# +# For RKE v0.2.x and below, the map of Kubernetes versions and their system images is +# located here: +# https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go +# +# For RKE v0.3.0 and above, the map of Kubernetes versions and their system images is +# located here: +# https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go +# +# In case the kubernetes_version and kubernetes image in +# system_images are defined, the system_images configuration +# will take precedence over kubernetes_version. kubernetes_version: v1.10.3-rancher2 -# System Image Tags are defaulted to a tag tied with specific kubernetes Versions -# Default Tags: https://github.com/rancher/types/blob/master/apis/management.cattle.io/v3/k8s_defaults.go) +# System Images are defaulted to a tag that is mapped to a specific +# Kubernetes Version and not required in a cluster.yml. +# Each individual system image can be specified if you want to use a different tag. +# +# For RKE v0.2.x and below, the map of Kubernetes versions and their system images is +# located here: +# https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go +# +# For RKE v0.3.0 and above, the map of Kubernetes versions and their system images is +# located here: +# https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go +# system_images: kubernetes: rancher/hyperkube:v1.10.3-rancher2 etcd: rancher/coreos-etcd:v3.1.12 @@ -120,7 +148,10 @@ services: # -----BEGIN PRIVATE KEY----- # xxxxxxxxxx # -----END PRIVATE KEY----- - # Note for Rancher 2 users: If you are configuring Cluster Options using a Config File when creating Rancher Launched Kubernetes, the names of services should contain underscores only: `kube_api`. This only applies to Rancher v2.0.5 and v2.0.6. + # Note for Rancher v2.0.5 and v2.0.6 users: If you are configuring + # Cluster Options using a Config File when creating Rancher Launched + # Kubernetes, the names of services should contain underscores + # only: `kube_api`. kube-api: # IP range for any services created on Kubernetes # This must match the service_cluster_ip_range in kube-controller @@ -137,7 +168,10 @@ services: delete-collection-workers: 3 # Set the level of log output to debug-level v: 4 - # Note for Rancher 2 users: If you are configuring Cluster Options using a Config File when creating Rancher Launched Kubernetes, the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. + # Note for Rancher 2 users: If you are configuring Cluster Options + # using a Config File when creating Rancher Launched Kubernetes, + # the names of services should contain underscores only: + # `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. kube-controller: # CIDR pool used to assign IP addresses to pods in the cluster cluster_cidr: 10.42.0.0/16 @@ -159,9 +193,10 @@ services: - "/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins" # Currently, only authentication strategy supported is x509. -# You can optionally create additional SANs (hostnames or IPs) to add to -# the API server PKI certificate. -# This is useful if you want to use a load balancer for the control plane servers. +# You can optionally create additional SANs (hostnames or IPs) to +# add to the API server PKI certificate. +# This is useful if you want to use a load balancer for the +# control plane servers. authentication: strategy: x509 sans: @@ -174,14 +209,16 @@ authentication: authorization: mode: rbac -# If you want to set a Kubernetes cloud provider, you specify the name and configuration +# If you want to set a Kubernetes cloud provider, you specify +# the name and configuration cloud_provider: name: aws -# Add-ons are deployed using kubernetes jobs. RKE will give up on trying to get the job status after this timeout in seconds.. +# Add-ons are deployed using kubernetes jobs. RKE will give +# up on trying to get the job status after this timeout in seconds.. addon_job_timeout: 30 -# There are several network plug-ins that work, but we default to canal +# Specify network plugin-in (canal, calico, flannel, weave, or none) network: plugin: canal @@ -191,10 +228,12 @@ dns: # Currently only nginx ingress provider is supported. # To disable ingress controller, set `provider: none` - +# `node_selector` controls ingress placement and is optional ingress: provider: nginx - + node_selector: + app: ingress + # All add-on manifests MUST specify a namespace addons: |- --- diff --git a/content/rke/latest/en/installation/_index.md b/content/rke/latest/en/installation/_index.md index 738f294a5ab..aa3745781c5 100644 --- a/content/rke/latest/en/installation/_index.md +++ b/content/rke/latest/en/installation/_index.md @@ -1,5 +1,6 @@ --- -title: Installation +title: RKE Kubernetes Installation +description: RKE is a fast, versatile Kubernetes installer you can use to install Kubernetes on your Linux hosts. Learn the simple steps for an RKE Kubernetes installation weight: 50 --- @@ -92,7 +93,7 @@ Run `rke config` to create a new `cluster.yml` in the current directory. This co rke config --name cluster.yml ``` -#### Creating an Empty `cluster.yml` +#### Other RKE Configuration Options You can create an empty template `cluster.yml` file by specifying the `--empty` flag. @@ -100,8 +101,6 @@ You can create an empty template `cluster.yml` file by specifying the `--empty` rke config --empty --name cluster.yml ``` -#### Printing the `cluster.yml` - Instead of creating a file, you can print the generated configuration to stdout using the `--print` flag. ``` diff --git a/content/rke/latest/en/managing-clusters/_index.md b/content/rke/latest/en/managing-clusters/_index.md index bfed7a7d10e..24dcb07884b 100644 --- a/content/rke/latest/en/managing-clusters/_index.md +++ b/content/rke/latest/en/managing-clusters/_index.md @@ -1,5 +1,6 @@ --- title: Adding and Removing Nodes +description: RKE supports adding/removing nodes for worker and controlplane hosts. Learn about the changes you need to make to the cluster.yml in order to add/remove nodes weight: 175 aliases: - /rke/latest/en/installation/managing-clusters/ diff --git a/content/rke/latest/en/os/_index.md b/content/rke/latest/en/os/_index.md index b92097c4823..d9da146c135 100644 --- a/content/rke/latest/en/os/_index.md +++ b/content/rke/latest/en/os/_index.md @@ -1,15 +1,13 @@ --- title: Requirements weight: 5 -aliases: - - /rke/latest/en/installation/os --- **In this section:** - [Operating System](#operating-system) - + - [General Linux Requirements](#general-linux-requirements) - [Red Hat Enterprise Linux (RHEL) / Oracle Enterprise Linux (OEL) / CentOS](#red-hat-enterprise-linux-rhel-oracle-enterprise-linux-oel-centos) - [Using upstream Docker](#using-upstream-docker) @@ -29,6 +27,8 @@ aliases: ## Operating System +### General Linux Requirements + RKE runs on almost any Linux OS with Docker installed. Most of the development and testing of RKE occurred on Ubuntu 16.04. However, some OS's have restrictions and specific requirements. - [SSH user]({{< baseurl >}}/rke/latest/en/config-options/nodes/#ssh-user) - The SSH user used for node access must be a member of the `docker` group on the node: @@ -36,6 +36,8 @@ RKE runs on almost any Linux OS with Docker installed. Most of the development a ``` usermod -aG docker ``` + +> **Note:** Users added to the `docker` group are granted effective root permissions on the host by means of the Docker API. Only choose a user that is intended for this purpose and has its credentials and access properly secured. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to see how you can configure access to Docker without using the `root` user. @@ -45,15 +47,16 @@ RKE runs on almost any Linux OS with Docker installed. Most of the development a * `modprobe module_name` * `lsmod | grep module_name` * `grep module_name /lib/modules/$(uname -r)/modules.builtin`, if it's a built-in module - * bash script - ```bash + * The following bash script + +```bash for module in br_netfilter ip6_udp_tunnel ip_set ip_set_hash_ip ip_set_hash_net iptable_filter iptable_nat iptable_mangle iptable_raw nf_conntrack_netlink nf_conntrack nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat nf_nat_ipv4 nf_nat_masquerade_ipv4 nfnetlink udp_tunnel veth vxlan x_tables xt_addrtype xt_conntrack xt_comment xt_mark xt_multiport xt_nat xt_recent xt_set xt_statistic xt_tcpudp; do if ! lsmod | grep -q $module; then - echo "module $module is not present" - fi - done; - ``` + echo "module $module is not present"; + fi; + done +``` Module name | ------------| @@ -161,13 +164,21 @@ By default, Atomic hosts do not come with a Docker group. You can update the own ## Software -- Docker - Each Kubernetes version supports different Docker versions. +This section describes the requirements for Docker, Kubernetes, and SSH. -Kubernetes Version | Supported Docker version(s) | -----|----| -v1.13.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | -v1.12.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | -v1.11.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | +### OpenSSH + +In order to SSH into each node, OpenSSH 7.0+ must be installed on each node. + +### Kubernetes + +Refer to the [RKE release notes](https://github.com/rancher/rke/releases) for the supported versions of Kubernetes. + +### Docker + +Each Kubernetes version supports different Docker versions. The Kubernetes release notes contain the [current list](https://kubernetes.io/docs/setup/release/notes/#dependencies) of validated Docker versions. + +### Installing Docker You can either follow the [Docker installation](https://docs.docker.com/install/) instructions or use one of Rancher's [install scripts](https://github.com/rancher/install-docker) to install Docker. For RHEL, please see [How to install Docker on Red Hat Enterprise Linux 7](https://access.redhat.com/solutions/3727511). @@ -177,6 +188,8 @@ Docker Version | Install Script | 18.06.2 | curl https://releases.rancher.com/install-docker/18.06.2.sh | sh | 17.03.2 | curl https://releases.rancher.com/install-docker/17.03.2.sh | sh | +### Checking the Installed Docker Version + Confirm that a Kubernetes supported version of Docker is installed on your machine, by running `docker version --format '{{.Server.Version}}'`. ``` @@ -184,10 +197,7 @@ docker version --format '{{.Server.Version}}' 17.03.2-ce ``` -- OpenSSH 7.0+ - In order to SSH into each node, OpenSSH must be installed on each node. - ## Ports - {{< ports-rke-nodes >}} {{< requirements_ports_rke >}} diff --git a/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md b/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md index f8cee4cc5e4..81240247b68 100644 --- a/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md +++ b/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md @@ -20,7 +20,7 @@ CONTAINER ID IMAGE COMMAND CREATED See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. -* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [RKE OS Requirements](https://rancher.com/docs/rke/latest/en/os/#red-hat-enterprise-linux-rhel-oracle-enterprise-linux-oel-centos) for more on how to set this up. +* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [RKE OS Requirements](https://rancher.com/docs/rke/latest/en/os/#red-hat-enterprise-linux-rhel-oracle-enterprise-linux-oel-centos) for more on how to set this up. * SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: ``` @@ -35,7 +35,7 @@ SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 #### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain -* The key file specified as `ssh_key_path` is not correct for accesing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. +* The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. #### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys diff --git a/content/rke/latest/en/upgrades/_index.md b/content/rke/latest/en/upgrades/_index.md index 8c5ffc2d0ba..5e47ee6ab09 100644 --- a/content/rke/latest/en/upgrades/_index.md +++ b/content/rke/latest/en/upgrades/_index.md @@ -3,45 +3,106 @@ title: Upgrades weight: 100 --- -After RKE has deployed Kubernetes, you can upgrade the versions of the components in your Kubernetes cluster, [definition of the Kubernetes services]({{< baseurl >}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/). +After RKE has deployed Kubernetes, you can upgrade the versions of the components in your Kubernetes cluster, the [definition of the Kubernetes services]({{< baseurl >}}/rke/latest/en/config-options/services/) or the [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/). -## Version Upgrades +The default Kubernetes version for each RKE version can be found in [the RKE release notes](https://github.com/rancher/rke/releases/). -RKE supports version upgrades by changing the image tags of the [system-images]({{< baseurl >}}/rke/latest/en/config-options/system-images/). +You can also select a newer version of Kubernetes to install for your cluster. Downgrading Kubernetes is not supported. -For example, to change the deployed Kubernetes version, you update the `rancher/hyperkube` tag from `v1.9.7` to `v1.10.3` in the `cluster.yml` that was originally used to deploy your Kubernetes cluster. +Each version of RKE has a specific [list of supported Kubernetes versions.](#listing-supported-kubernetes-versions) -Original YAML +In case the Kubernetes version is defined in the `kubernetes_version` directive and under the `system-images` directive are defined, the `system-images` configuration will take precedence over `kubernetes_version`. + +This page covers the following topics: + +- [Prerequisites](#prerequisites) +- [Upgrading Kubernetes](#upgrading-kubernetes) +- [Listing supported Kubernetes versions](#listing-supported-kubernetes-versions) +- [Kubernetes version precedence](#kubernetes-version-precedence) +- [Using an unsupported Kubernetes version](#using-an-unsupported-kubernetes-version) +- [Mapping the Kubernetes version to services](#mapping-the-kubernetes-version-to-services) +- [Service upgrades](#service-upgrades) +- [Add-ons upgrades](#add-ons-upgrades) + +### Prerequisites + +- Ensure that any `system_images` configuration is absent from the `cluster.yml`. The Kubernetes version should only be listed under the `system_images` directive if an [unsupported version](#using-an-unsupported-kubernetes-version) is being used. Refer to [Kubernetes version precedence](#kubernetes-version-precedence) for more information. +- Ensure that the correct files to manage [Kubernetes cluster state]({{< baseurl >}}/rke/latest/en/installation/#kubernetes-cluster-state) are present in the working directory. Refer to the tabs below for the required files, which differ based on the RKE version. + +{{% tabs %}} +{{% tab "RKE v0.2.0+" %}} +The `cluster.rkestate` file contains the current state of the cluster including the RKE configuration and the certificates. + +This file is created in the same directory that has the cluster configuration file `cluster.yml`. + +It is required to keep the `cluster.rkestate` file to perform any operation on the cluster through RKE, or when upgrading a cluster last managed via RKE v0.2.0 or later. +{{% /tab %}} +{{% tab "RKE prior to v0.2.0" %}} +Ensure that the `kube_config_cluster.yml` file is present in the working directory. + +RKE saves the Kubernetes cluster state as a secret. When updating the state, RKE pulls the secret, updates or changes the state, and saves a new secret. The `kube_config_cluster.yml` file is required for upgrading a cluster last managed via RKE v0.1.x. +{{% /tab %}} +{{% /tabs %}} + +### Upgrading Kubernetes + +> **Note:** RKE does not support rolling back to previous versions. + +To upgrade the Kubernetes version of an RKE-provisioned cluster, set the `kubernetes_version` string in the `cluster.yml` to the desired version from the [list of supported Kubernetes versions](#listing-supported-kubernetes-versions) for the specific version of RKE: ```yaml -system_images: - kubernetes: rancher/hyperkube:v1.9.7 +kubernetes_version: "v1.15.5-rancher1-1" ``` -Updated YAML - -```yaml -system_images: - kubernetes: rancher/hyperkube:v1.10.3 -``` - -After updating your `cluster.yml` with the required changes, all you need to do is run `rke up` to upgrade Kubernetes. +Then invoke `rke up`: ``` $ rke up --config cluster.yml ``` -First, RKE will use the local `kube_config_cluster.yml` to confirm the versions of the existing components in the Kubernetes cluster before upgrading to the latest image. +### Listing Supported Kubernetes Versions -> **Note:** RKE does not support rollback to previous versions. +Please refer to the [release notes](https://github.com/rancher/rke/releases) of the RKE version that you are running, to find the list of supported Kubernetes versions as well as the default Kubernetes version. -## Service Upgrades +You can also list the supported versions and system images of specific version of RKE release with a quick command. + +``` +$ rke config --list-version --all +v1.15.3-rancher2-1 +v1.13.10-rancher1-2 +v1.14.6-rancher2-1 +v1.16.0-beta.1-rancher1-1 +``` + +### Kubernetes Version Precedence + +In case both `kubernetes_version` and `system_images` are defined, the `system_images` configuration will take precedence over `kubernetes_version`. + +In addition, if neither `kubernetes_version` nor `system_images` are configured in the `cluster.yml`, RKE will apply the default Kubernetes version for the specific version of RKE used to invoke `rke up`. + +### Using an Unsupported Kubernetes Version + +As of v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, then RKE will error out. + +Prior to v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, the default version from the supported list is used. + +If you want to use a different version from the supported list, please use the [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) option. + +### Mapping the Kubernetes Version to Services + +In RKE, `kubernetes_version` is used to map the version of Kubernetes to the default services, parameters, and options. + +For RKE v0.3.0+, the service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). + +For RKE prior to v0.3.0, the service defaults are located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). Note: The version in the path of the service defaults file corresponds to a Rancher version. Therefore, for Rancher v2.1.x, [this file](https://github.com/rancher/types/blob/release/v2.1/apis/management.cattle.io/v3/k8s_defaults.go) should be used. + +### Service Upgrades [Services]({{< baseurl >}}/rke/latest/en/config-options/services/) can be upgraded by changing any of the services arguments or `extra_args` and running `rke up` again with the updated configuration file. > **Note:** The following arguments, `service_cluster_ip_range` or `cluster_cidr`, cannot be changed as any changes to these arguments will result in a broken cluster. Currently, network pods are not automatically upgraded. -## Add-Ons Upgrades +### Add-Ons Upgrades As of v0.1.8, upgrades to add-ons are supported. diff --git a/layouts/partials/page-edit.html b/layouts/partials/page-edit.html index 5e16a32293f..4ab823fef49 100755 --- a/layouts/partials/page-edit.html +++ b/layouts/partials/page-edit.html @@ -1,3 +1,4 @@ +{{ if not .Lastmod.IsZero }}Last updated on {{ .Lastmod.Format "Jan 2, 2006" }}{{ end }} \ No newline at end of file + diff --git a/layouts/shortcodes/img.html b/layouts/shortcodes/img.html new file mode 100644 index 00000000000..e7a85ae6d4d --- /dev/null +++ b/layouts/shortcodes/img.html @@ -0,0 +1,14 @@ +{{ $img := .Get 0 }} +{{ $alt := .Get 1 }} +{{ with resources.Get $img }} + {{ $thumb10 := .Resize "1000x" }} + {{ $thumb8 := .Resize "800x" }} + {{ $thumb6 := .Resize "600x" }} + {{ $thumb4 := .Resize "400x" }} + {{ $thumb2 := .Resize "200x" }} + {{$alt}} +{{ end }} diff --git a/layouts/shortcodes/persistentdata.html b/layouts/shortcodes/persistentdata.html index 91d841be675..f816e7fff37 100644 --- a/layouts/shortcodes/persistentdata.html +++ b/layouts/shortcodes/persistentdata.html @@ -1,9 +1,14 @@
-

Rancher uses etcd as datastore. When using the Single Node Install, the embedded etcd is being used. The persistent data is at the following path in the container: /var/lib/rancher. You can bind mount a host volume to this location to preserve data on the host it is running on. When using RancherOS, please check what persistent storage directories you can use to store the data.

+

Rancher uses etcd as datastore. When using the Docker Install, the embedded etcd is + being used. The persistent data is at the following path in the container: /var/lib/rancher. You can + bind mount a host volume to this location to preserve data on the host it is running on. When using RancherOS, + please check what persistent storage + directories you can use to store the data.

-
Command:
+

Command:

-
+  
 docker run -d --restart=unless-stopped \
   -p 80:80 -p 443:443 \
   -v /opt/rancher:/var/lib/rancher \
diff --git a/layouts/shortcodes/requirements_ports_rancher.html b/layouts/shortcodes/requirements_ports_rancher.html
deleted file mode 100644
index 62d2297d16e..00000000000
--- a/layouts/shortcodes/requirements_ports_rancher.html
+++ /dev/null
@@ -1,59 +0,0 @@
-
-

Rancher nodes:
Nodes running the rancher/rancher container

-

Rancher nodes - Inbound rules

- - - - - - - - - - - - - - - - - - - -
ProtocolPortSourceDescription
TCP80
  • Load balancer/proxy that does external SSL termination
Rancher UI/API when external SSL termination is used
TCP443
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • Hosted/Imported Kubernetes
  • any that needs to be able to use UI/API
Rancher agent, Rancher UI/API, kubectl
-

Rancher nodes - Outbound rules

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ProtocolPortDestinationDescription
TCP22
  • Any node IP from a node created using Node Driver
SSH provisioning of nodes using Node Driver
TCP443
  • 35.160.43.145/32
  • 35.167.242.46/32
  • 52.33.59.17/32
git.rancher.io (catalogs)
TCP2376
  • Any node IP from a node created using Node Driver
Docker daemon TLS port used by Docker Machine
TCP6443
  • Hosted/Imported Kubernetes API
Kubernetes apiserver
- -
-
diff --git a/layouts/shortcodes/ssl_faq_ha.html b/layouts/shortcodes/ssl_faq_ha.html index b0f4525bfb6..3b46867e35a 100644 --- a/layouts/shortcodes/ssl_faq_ha.html +++ b/layouts/shortcodes/ssl_faq_ha.html @@ -2,9 +2,9 @@

You can recognize the PEM format by the following traits:

    -
  • The file begins with the following header:
    -----BEGIN CERTIFICATE-----
  • -
  • The header is followed by a long string of characters. Like, really long.
  • -
  • The file ends with a footer:
    -----END CERTIFICATE-----
  • +
  • The file begins with the following header:
    -----BEGIN CERTIFICATE-----
  • +
  • The header is followed by a long string of characters. Like, really long.
  • +
  • The file ends with a footer:
    -----END CERTIFICATE-----

PEM Certificate Example:

@@ -22,9 +22,9 @@ VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==

To encode your certificates in base64:

    -
  1. Change directory to where the PEM file resides.
  2. -
  3. Run one of the following commands. Replace FILENAME with the name of your certificate. -
    +  
  4. Change directory to where the PEM file resides.
  5. +
  6. Run one of the following commands. Replace FILENAME with the name of your certificate. +
     # MacOS
     cat FILENAME | base64
     # Linux
    @@ -32,7 +32,7 @@ cat FILENAME | base64 -w0
     # Windows
     certutil -encode FILENAME FILENAME.base64
     
    -
  7. +

How Can I Verify My Generated base64 String For The Certificates?

@@ -40,9 +40,10 @@ certutil -encode FILENAME FILENAME.base64

To decode your certificates in base64:

    -
  1. Copy the generated base64 string.
  2. -
  3. Run one of the following commands. Replace YOUR_BASE64_STRING with the previously copied base64 string. -
    +  
  4. Copy the generated base64 string.
  5. +
  6. Run one of the following commands. Replace YOUR_BASE64_STRING with the previously copied base64 + string. +
     # MacOS
     echo YOUR_BASE64_STRING | base64 -D
     # Linux
    @@ -50,7 +51,7 @@ echo YOUR_BASE64_STRING | base64 -d
     # Windows
     certutil -decode FILENAME.base64 FILENAME.verify
     
    -
  7. +
@@ -69,9 +70,12 @@ certutil -decode FILENAME.base64 FILENAME.verify

How Do I Validate My Certificate Chain?

-

You can validate the certificate chain by using the openssl binary. If the output of the command (see the command example below) ends with Verify return code: 0 (ok), your certificate chain is valid. The ca.pem file must be the same as you added to the rancher/rancher container. When using a certificate signed by a recognized Certificate Authority, you can omit the -CAfile parameter.

+

You can validate the certificate chain by using the openssl binary. If the output of the command (see + the command example below) ends with Verify return code: 0 (ok), your certificate chain is valid. The + ca.pem file must be the same as you added to the rancher/rancher container. When using a + certificate signed by a recognized Certificate Authority, you can omit the -CAfile parameter.

-
Command:
+

Command:

 openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443 -servername rancher.yourdomain.com
 ...
diff --git a/layouts/shortcodes/ssl_faq_single.html b/layouts/shortcodes/ssl_faq_single.html
index 71f02c6b4d0..f10af1e0903 100644
--- a/layouts/shortcodes/ssl_faq_single.html
+++ b/layouts/shortcodes/ssl_faq_single.html
@@ -2,9 +2,9 @@
 
 

You can recognize the PEM format by the following traits:

    -
  • The file begins with the following header:
    -----BEGIN CERTIFICATE-----
  • -
  • The header is followed by a long string of characters. Like, really long.
  • -
  • The file ends with a footer:
    -----END CERTIFICATE-----
  • +
  • The file begins with the following header:
    -----BEGIN CERTIFICATE-----
  • +
  • The header is followed by a long string of characters. Like, really long.
  • +
  • The file ends with a footer:
    -----END CERTIFICATE-----

PEM Certificate Example:

@@ -27,9 +27,11 @@ VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== -----END RSA PRIVATE KEY-----
-

If your key looks like the example below, see How Can I Convert My Certificate Key From PKCS8 to PKCS1?

+

If your key looks like the example below, see How Can I Convert My Certificate Key From + PKCS8 to PKCS1? +

-

+    
 -----BEGIN PRIVATE KEY-----
 MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV
 ... more lines
@@ -37,27 +39,27 @@ VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
 -----END PRIVATE KEY-----
 
-

How Can I Convert My Certificate Key From PKCS8 to PKCS1?

+

How Can I Convert My Certificate Key From PKCS8 to PKCS1?

-

If you are using a PKCS8 certificate key file, Rancher will log the following line:

+

If you are using a PKCS8 certificate key file, Rancher will log the following line:

-
+    
 ListenConfigController cli-config [listener] failed with : failed to read private key: asn1: structure error: tags don't match (2 vs {class:0 tag:16 length:13 isCompound:true})
 
-

To make this work, you will need to convert the key from PKCS8 to PKCS1 using the command below:

+

To make this work, you will need to convert the key from PKCS8 to PKCS1 using the command below:

-
+    
 openssl rsa -in key.pem -out convertedkey.pem
 
-

You can now use convertedkey.pem as certificate key file for Rancher.

+

You can now use convertedkey.pem as certificate key file for Rancher.

-

What is the Order of Certificates if I Want to Add My Intermediate(s)?

+

What is the Order of Certificates if I Want to Add My Intermediate(s)?

-

The order of adding certificates is as follows:

+

The order of adding certificates is as follows:

-
+    
 -----BEGIN CERTIFICATE-----
 %YOUR_CERTIFICATE%
 -----END CERTIFICATE-----
@@ -66,12 +68,16 @@ openssl rsa -in key.pem -out convertedkey.pem
 -----END CERTIFICATE-----
 
-

How Do I Validate My Certificate Chain?

+

How Do I Validate My Certificate Chain?

-

You can validate the certificate chain by using the openssl binary. If the output of the command (see the command example below) ends with Verify return code: 0 (ok), your certificate chain is valid. The ca.pem file must be the same as you added to the rancher/rancher container. When using a certificate signed by a recognized Certificate Authority, you can omit the -CAfile parameter.

+

You can validate the certificate chain by using the openssl binary. If the output of the command + (see the command example below) ends with Verify return code: 0 (ok), your certificate chain is + valid. The ca.pem file must be the same as you added to the rancher/rancher container. + When using a certificate signed by a recognized Certificate Authority, you can omit the -CAfile + parameter.

-
Command:
-
+    

Command:

+
 openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443
 ...
     Verify return code: 0 (ok)
diff --git a/layouts/shortcodes/step_create-cloud-credential.html b/layouts/shortcodes/step_create-cloud-credential.html
deleted file mode 100644
index 1cc2891c0b5..00000000000
--- a/layouts/shortcodes/step_create-cloud-credential.html
+++ /dev/null
@@ -1,6 +0,0 @@
-

-As of v2.2.0, account access information will be stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. -
-Since multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. To create a new cloud credential, enter Name and Account Access data, then click Create. -
-
diff --git a/layouts/shortcodes/step_create-cluster_node-pools.html b/layouts/shortcodes/step_create-cluster_node-pools.html index 158eaf7f1f3..ada69c017f6 100644 --- a/layouts/shortcodes/step_create-cluster_node-pools.html +++ b/layouts/shortcodes/step_create-cluster_node-pools.html @@ -1,4 +1,4 @@ -

Add one or more node pools to your cluster.

A node pool is a collection of nodes based on a node template. A node Template defines the configuration of a node, like what Operating System to use, number of CPUs and amount of memory. Each node pool must have one or more nodes roles assigned.

+

Add one or more node pools to your cluster.

A node pool is a collection of nodes based on a node template. A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. Each node pool must have one or more nodes roles assigned.

Notes:

diff --git a/layouts/shortcodes/step_rancher-template.html b/layouts/shortcodes/step_rancher-template.html index 0323edb2d58..96a1584194c 100644 --- a/layouts/shortcodes/step_rancher-template.html +++ b/layouts/shortcodes/step_rancher-template.html @@ -1,19 +1,24 @@ -

Complete the Rancher Template form to configure the Docker daemon on the instances that will be created.

+

The Docker daemon configuration options include:

  • -

    Labels can be configured on the Docker daemon.

    - -

    Docker object label documentation

    +

    + Labels: For information on labels, refer to the Docker + object label documentation. +

  • - -

    Engine Options customize the configuration of the Docker daemon. Important configuration options might include: -

      -
    • Docker Engine Install URL: Determines what Docker version will be installed on the instance.

      When using RancherOS, please check what Docker versions are available using sudo ros engine list on the RancherOS version you want to use, as the default Docker version configured might not be available. If you experience issues installing Docker on other operating systems, please try to install Docker manually using the configured Docker Engine Install URL to troubleshoot.
    • -
    • Registry mirrors: Docker Registry mirror to be used by the Docker daemon
    • -
    +

    + Docker Engine Install URL: Determines what Docker version will be installed on the instance. Note: If you are using RancherOS, please check what Docker versions are available using sudo ros engine list on the RancherOS version you want to use, as the default Docker version configured might not be available. If you experience issues installing Docker on other operating systems, please try to install Docker manually using the configured Docker Engine Install URL to troubleshoot.

    - -

    Docker daemon option reference

    +
  • +
  • +

    + Registry mirrors: Docker Registry mirror to be used by the Docker daemon +

    +
  • +
  • +

    Other advanced options: Refer to the Docker daemon option reference + +

diff --git a/nginx.conf b/nginx.conf index d9bf2807047..cbe1b9139c8 100644 --- a/nginx.conf +++ b/nginx.conf @@ -2,6 +2,121 @@ map_hash_bucket_size 256; map $request_uri $redirect_uri { ~^/docs/rancher/v2.0(/?.*)$ /docs/rancher/v2.x$1; ~^/docs/rke/v0.1.x(/?.*)$ /docs/rke/latest$1; + ~^/docs/os/quick-start-guide/?$ /rancher-os; + ~^/docs/rancher/v1.0/zh/rancher-compose/?$ /docs/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/; + ~^/docs/rancher/v1.0/zh/rancher-compose/?$ /docs/rancher/v1.6/en/cattle/rancher-compose/; + ~^/docs/rancher/rancher-ui/applications/stacks/adding-balancers/?$ /docs/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/; + ~^/docs/os/running-rancheros/server/install-to-disk/?$ /os/v1.x/en/quick-start-guide/; + ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/overview/; + ~^/docs/rancher/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/; + ~^/docs/os/running-rancheros/server/install-to-disk/?$ /docs/os/v1.x/en/quick-start-guide/; + ~^/docs/os/running-rancheros/cloud/gce/?$ /docs/os/v1.x/en/installation/running-rancheros/cloud/gce/; + ~^/docs/os/amazon-ecs/?$ /docs/os/v1.x/en/installation/running-rancheros/cloud/aws/; + ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/overview/architecture/; + ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; + ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/overview/architecture/; + ~^/docs/rancher/rancher-compose/?$ /docs/rancher/v2.x/en/; + ~^/docs/os/networking/interfaces/?$ /docs/os/v1.x/en/installation/networking/interfaces/; + ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; + ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/; + ~^/docs/rancher/rancher-services/storage-service/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; + ~^/docs/rancher/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/installation/ha/; + ~^/docs/rancher/upgrading/?$ /docs/rancher/v2.x/en/upgrades/; + ~^/docs/rancher/configuration/access-control/?$ /docs/rancher/v2.x/en/admin-settings/rbac/; + ~^/docs/os/running-rancheros/server/install-to-disk/?$ /docs/os/v1.x/en/installation/running-rancheros/; + ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; + ~^/docs/os/configuration/custom-rancheros-iso/?$ /docs/os/v1.x/en/installation/configuration/; + ~^/docs/rancher/rancher-compose/?$ /docs/rancher/v2.x/en/; + ~^/docs/os/running-rancheros/server/raspberry-pi/?$ /docs/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/; + ~^/docs/rancher/v1.0/zh/installing-rancher/installing-server/?$ /docs/rancher/v1.6/en/installing-rancher/installing-server/; + ~^/docs/rancher/rancher-services/metadata-service/?$ /docs/rancher/v1.6/en/rancher-services/metadata-service/; + ~^/docs/rancher/api/?$ /docs/rancher/v2.x/en/api/; + ~^/docs/os/running-rancheros/server/raspberry-pi/?$ /docs/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/; + ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; + ~^/docs/os/configuration/switching-consoles/?$ /docs/os/v1.x/en/about/recovery-console/; + ~^/docs/os/running-rancheros/server/install-to-disk//?$ /docs/os/v1.x/en/installation/running-rancheros/server/install-to-disk/; + ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/; + ~^/docs/rancher/v1.2/en/installing-rancher/installing-server/multi-nodes//?$ /docs/rancher/v2.x/en/installation/ha/; + ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/; + ~^/docs/os/system-services/adding-system-services/?$ /docs/os/v1.x/en/installation/system-services/adding-system-services/; + ~^/docs/rancher/installing-rancher/installing-server/?$ /docs/rancher/v2.x/en/installation/; + ~^/docs/rancher/latest/en/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/installation/ha/; + ~^/docs/rancher/rancher-services/metadata-service/?$ /docs/rancher/v1.6/en/rancher-services/metadata-service/; + ~^/docs/rancher/rancher-services/health-checks/?$ /docs/rancher/v1.0/en/rancher-services/health-checks/; + ~^/docs/os/cloud-config/?$ /docs/os/v1.x/en/installation/configuration/; + ~^/docs/rancher/api/?$ /docs/rancher/v2.x/en/api/; + + ~^/docs/rancher/v1.0/en/environments/certificates/?$ /docs/rancher/v1.0/en/rancher-ui/infrastructure/certificates/; + ~^/docs/rancher/v1.1/en/api/api-keys/?$ /docs/rancher/v1.1/en/api/v1/api-keys/; + ~^/docs/rancher/v1.1/zh/?$ /docs/rancher/v1.1/en/; + ~^/docs/rancher/v1.2/en/api/api-keys/?$ /docs/rancher/v1.2/en/api/v2-beta/api-keys/; + ~^/docs/rancher/v1.2/zh/?$ /docs/rancher/v1.2/en/; + ~^/docs/rancher/v1.3/en/api/api-keys/?$ /docs/rancher/v1.3/en/api/v2-beta/api-keys/; + ~^/docs/rancher/v1.4/en/api/api-keys/?$ /docs/rancher/v1.4/en/api/v2-beta/api-keys/; + ~^/docs/rancher/v1.4/zh/?$ /docs/rancher/v1.4/en/; + ~^/docs/rancher/v1.5/en/api/api-keys/?$ /docs/rancher/v1.5/en/api/v2-beta/api-keys/; + ~^/docs/rancher/v1.5/zh/?$ /docs/rancher/v1.5/en/; + ~^/docs/rancher/v1.6/en/api/api-keys/?$ /docs/rancher/latest/en/api/v2-beta/api-keys/; + ~^/docs/rancher/v2.x/en/admin-settings/agent-options/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/; + ~^/docs/rancher/v2.x/en/admin-settings/api-audit-log/?$ /docs/rancher/v2.x/en/installation/options/api-audit-log/; + ~^/rancher/v2.x/en/admin-settings/custom-ca-root-certificate/?$ /docs/rancher/v2.x/en/installation/options/custom-ca-root-certificate/; + ~^/docs/rancher/v2.x/en/admin-settings/feature-flags/?$ /docs/rancher/v2.x/en/installation/options/feature-flags/; + ~^/docs/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/?$ /docs/rancher/v2.x/en/removing-rancher/; + ~^/docs/rancher/v2.x/en/admin-settings/removing-rancher/user-cluster-nodes/?$ /docs/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/; + ~^/docs/rancher/v2.x/en/admin-settings/server-url/?$ /docs/rancher/v2.x/en/admin-settings/; + ~^/docs/rancher/v2.x/en/admin-settings/tls-settings/?$ /docs/rancher/v2.x/en/installation/options/tls-settings/; + ~^/docs/rancher/v2.x/en/cluster-admin/kubectl/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; + ~^/docs/rancher/v2.x/en/cluster-provisioning/cluster-members/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/; + ~^/docs/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/; + ~^/docs/rancher/v2.x/en/cluster-provisioning/rancher-agents/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/; + ~^/docs/rancher/v2.x/en/concepts/cli-configuration/?$ /docs/rancher/v2.x/en/cli/; + ~^/docs/rancher/v2.x/en/concepts/volumes-and-storage/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; + ~^/docs/rancher/v2.x/en/faq/cleaning-cluster-nodes/?$ /docs/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/; + ~^/docs/rancher/v2.x/en/installation/air-gap-high-availability/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; + ~^/docs/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/; + ~^/docs/rancher/v2.x/en/installation/air-gap-installation/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; + ~^/docs/rancher/v2.x/en/installation/air-gap-installation/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; + ~^/docs/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/; + ~^/docs/rancher/v2.x/en/installation/air-gap-single-node/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; + ~^/docs/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; + ~^/docs/rancher/v2.x/en/installation/api-auditing/?$ /docs/rancher/v2.x/en/installation/options/api-audit-log/; + ~^/docs/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration/?$ /docs/rancher/v2.x/en/backups/backups/ha-backups/; + ~^/docs/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/?$ /docs/rancher/v2.x/en/backups/backups/single-node-backups/; + ~^/docs/rancher/v2.x/en/installation/ha-server-install-external-lb/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/; + ~^/docs/rancher/v2.x/en/installation/ha-server-install/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/; + ~^/docs/rancher/v2.x/en/installation/ha-server-install/nlb/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/; + ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/?$ /docs/rancher/v2.x/en/installation/options/chart-options/; + ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/?$ /docs/rancher/v2.x/en/installation/options/tls-secrets/; + ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/?$ /docs/rancher/v2.x/en/installation/options/troubleshooting/; + ~^/docs/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/api-auditing/; + ~^/docs/rancher/v2.x/en/installation/references/?$ /docs/rancher/v2.x/en/installation/requirements/; + ~^/docs/rancher/v2.x/en/installation/single-node-install-external-lb/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/single-node-install-external-lb/; + ~^/docs/rancher/v2.x/en/installation/single-node-install/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/; + ~^/docs/rancher/v2.x/en/installation/single-node/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/editing-clusters/?$ /docs/rancher/v2.x/en/cluster-admin/editing-clusters/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/kubeconfig/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubeconfig/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/kubectl/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/?$ /docs/rancher/v2.x/en/cluster-admin/projects-and-namespaces/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/?$ /docs/rancher/v2.x/en/project-admin/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/?$ /docs/rancher/v2.x/en/project-admin/project-members/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/?$ /docs/rancher/v2.x/en/project-admin/resource-quotas/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/; + ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/; + ~^/docs/rancher/v2.x/en/project-admin/tools/pipelines/?$ /docs/rancher/v2.x/en/project-admin/pipelines/; + ~^/docs/rancher/v2.x/en/tasks/clusters/creating-a-cluster/?$ /docs/rancher/v2.x/en/cluster-provisioning/; + ~^/docs/rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-amazon-ec2/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/; + ~^/docs/rancher/v2.x/en/tasks/clusters/using-kubectl-to-access-a-cluster/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; + ~^/docs/rancher/v2.x/en/tools/?$ /docs/rancher/v2.x/en/cluster-admin/tools/; + ~^/docs/rancher/v2.x/en/tools/logging/?$ /docs/rancher/v2.x/en/cluster-admin/tools/logging/; + ~^/docs/rancher/v2.x/en/tools/pipelines/?$ /docs/rancher/v2.x/en/project-admin/pipelines/; + ~^/docs/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x/?$ /docs/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/; + ~^/docs/rancher/v2.x/en/upgrades/ha-server-rollbacks/?$ /docs/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/; + ~^/docs/rancher/v2.x/en/upgrades/single-node-rollbacks/?$ /docs/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/; + ~^/docs/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/?$ /docs/rancher/v2.x/en/upgrades/upgrades/ha/; + ~^/docs/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/?$ /docs/rancher/v2.x/en/upgrades/upgrades/single-node/; + ~^/docs/rke/latest/en/installation/os/?$ /docs/rke/latest/en/os/; } server { diff --git a/package.json b/package.json index fa556592531..8ee8cc9821e 100644 --- a/package.json +++ b/package.json @@ -14,7 +14,7 @@ "atomic-algolia": "^0.3.15", "instantsearch.js": "^2.8.0", "izimodal": "^1.5.1", - "jquery": "^3.3.1", + "jquery": "^3.4.0", "jsdom": "^11.11.0", "lory.js": "^2.4.1", "md5": "^2.2.1", diff --git a/static/img/k3s/k3s-production-setup.svg b/static/img/k3s/k3s-production-setup.svg new file mode 100644 index 00000000000..2d132eb9566 --- /dev/null +++ b/static/img/k3s/k3s-production-setup.svg @@ -0,0 +1,1176 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/img/os/RancherOS_aliyun1.jpg b/static/img/os/RancherOS_aliyun1.jpg new file mode 100644 index 00000000000..7cc8c5c0d90 Binary files /dev/null and b/static/img/os/RancherOS_aliyun1.jpg differ diff --git a/static/img/os/RancherOS_aliyun2.jpg b/static/img/os/RancherOS_aliyun2.jpg new file mode 100644 index 00000000000..cd84c1a7b8c Binary files /dev/null and b/static/img/os/RancherOS_aliyun2.jpg differ diff --git a/static/img/rancher/Google-Credentials-tab.png b/static/img/rancher/Google-Credentials-tab.png new file mode 100644 index 00000000000..06174357b54 Binary files /dev/null and b/static/img/rancher/Google-Credentials-tab.png differ diff --git a/static/img/rancher/Google-Enable-APIs-Screen.png b/static/img/rancher/Google-Enable-APIs-Screen.png new file mode 100644 index 00000000000..601eebceae5 Binary files /dev/null and b/static/img/rancher/Google-Enable-APIs-Screen.png differ diff --git a/static/img/rancher/Google-Enable-Admin-APIs.png b/static/img/rancher/Google-Enable-Admin-APIs.png new file mode 100644 index 00000000000..4f0afbf7db8 Binary files /dev/null and b/static/img/rancher/Google-Enable-Admin-APIs.png differ diff --git a/static/img/rancher/Google-OAuth-consent-screen-tab.png b/static/img/rancher/Google-OAuth-consent-screen-tab.png new file mode 100644 index 00000000000..e15132c50ee Binary files /dev/null and b/static/img/rancher/Google-OAuth-consent-screen-tab.png differ diff --git a/static/img/rancher/Google-Select-UniqueID-column.png b/static/img/rancher/Google-Select-UniqueID-column.png new file mode 100644 index 00000000000..c4aa7011f54 Binary files /dev/null and b/static/img/rancher/Google-Select-UniqueID-column.png differ diff --git a/static/img/rancher/Google-svc-acc-step1.png b/static/img/rancher/Google-svc-acc-step1.png new file mode 100644 index 00000000000..f8c225e9691 Binary files /dev/null and b/static/img/rancher/Google-svc-acc-step1.png differ diff --git a/static/img/rancher/Google-svc-acc-step2.png b/static/img/rancher/Google-svc-acc-step2.png new file mode 100644 index 00000000000..683a4264984 Binary files /dev/null and b/static/img/rancher/Google-svc-acc-step2.png differ diff --git a/static/img/rancher/Google-svc-acc-step3-key-creation.png b/static/img/rancher/Google-svc-acc-step3-key-creation.png new file mode 100644 index 00000000000..19f72ede5a9 Binary files /dev/null and b/static/img/rancher/Google-svc-acc-step3-key-creation.png differ diff --git a/static/img/rancher/add-custom-metrics.gif b/static/img/rancher/add-custom-metrics.gif new file mode 100644 index 00000000000..9c6405a3433 Binary files /dev/null and b/static/img/rancher/add-custom-metrics.gif differ diff --git a/static/img/rancher/add-ingress-form.png b/static/img/rancher/add-ingress-form.png new file mode 100644 index 00000000000..405ff3abf1e Binary files /dev/null and b/static/img/rancher/add-ingress-form.png differ diff --git a/static/img/rancher/add-ingress.gif b/static/img/rancher/add-ingress.gif new file mode 100644 index 00000000000..b9a3f449d5b Binary files /dev/null and b/static/img/rancher/add-ingress.gif differ diff --git a/static/img/rancher/add-node-label.gif b/static/img/rancher/add-node-label.gif new file mode 100644 index 00000000000..9c41e774064 Binary files /dev/null and b/static/img/rancher/add-node-label.gif differ diff --git a/static/img/rancher/add-pod-label.gif b/static/img/rancher/add-pod-label.gif new file mode 100644 index 00000000000..b78da3ce7cb Binary files /dev/null and b/static/img/rancher/add-pod-label.gif differ diff --git a/static/img/rancher/add-record.png b/static/img/rancher/add-record.png new file mode 100644 index 00000000000..8838a5ea6ff Binary files /dev/null and b/static/img/rancher/add-record.png differ diff --git a/static/img/rancher/auth-providers.svg b/static/img/rancher/auth-providers.svg new file mode 100644 index 00000000000..8b53323d25a --- /dev/null +++ b/static/img/rancher/auth-providers.svg @@ -0,0 +1,2 @@ + +
Rancher
Authentication
Proxy
[Not supported by viewer]
Authentication Providers
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/cattle-load-balancer.svg b/static/img/rancher/cattle-load-balancer.svg new file mode 100644 index 00000000000..70db25baa0b --- /dev/null +++ b/static/img/rancher/cattle-load-balancer.svg @@ -0,0 +1,2 @@ + +
Cattle Environment
[Not supported by viewer]
Host 1
Host 1
Host 2
Host 2
haproxy
haproxy
haproxy
haproxy
chat 1
[Not supported by viewer]
web 1
web 1
web 2
web 2

<div style="text-align: center ; font-size: 18px"><br></div>
Host 3
Host 3
Host 4
Host 4
haproxy
haproxy
haproxy
haproxy
chat 2
chat 2
web 3
web 3
chat 3
chat 3

<div style="text-align: center ; font-size: 18px"><br></div>
Load Balancer 1
Load Balancer 1
Load Balancer 2
Load Balancer 2
Resolves to: 

- Host 1 IP: 80
- Host 2 IP: 80
[Not supported by viewer]
Resolves to: 

- Host 3 IP: 80
- Host 4 IP: 80
[Not supported by viewer]
web.com/login
web.com/login
chat.com/login
chat.com/login
\ No newline at end of file diff --git a/static/img/rancher/deploy-service.gif b/static/img/rancher/deploy-service.gif new file mode 100644 index 00000000000..bf97d1690e7 Binary files /dev/null and b/static/img/rancher/deploy-service.gif differ diff --git a/static/img/rancher/deploy-workload-hostport.png b/static/img/rancher/deploy-workload-hostport.png new file mode 100644 index 00000000000..ec6193df3c4 Binary files /dev/null and b/static/img/rancher/deploy-workload-hostport.png differ diff --git a/static/img/rancher/deploy-workload-load-balancer.png b/static/img/rancher/deploy-workload-load-balancer.png new file mode 100644 index 00000000000..4751b599a28 Binary files /dev/null and b/static/img/rancher/deploy-workload-load-balancer.png differ diff --git a/static/img/rancher/deploy-workload-nodeport.png b/static/img/rancher/deploy-workload-nodeport.png new file mode 100644 index 00000000000..d1cfa67e35b Binary files /dev/null and b/static/img/rancher/deploy-workload-nodeport.png differ diff --git a/static/img/rancher/edit-migration-workload.gif b/static/img/rancher/edit-migration-workload.gif new file mode 100644 index 00000000000..f9510b8ff9f Binary files /dev/null and b/static/img/rancher/edit-migration-workload.gif differ diff --git a/static/img/rancher/enable-cluster-monitoring.gif b/static/img/rancher/enable-cluster-monitoring.gif new file mode 100644 index 00000000000..baef3cc2487 Binary files /dev/null and b/static/img/rancher/enable-cluster-monitoring.gif differ diff --git a/static/img/rancher/enable-project-monitoring.gif b/static/img/rancher/enable-project-monitoring.gif new file mode 100644 index 00000000000..f44c67eb8f7 Binary files /dev/null and b/static/img/rancher/enable-project-monitoring.gif differ diff --git a/static/img/rancher/global-menu.png b/static/img/rancher/global-menu.png index 25ba756e0b4..68465a42bee 100644 Binary files a/static/img/rancher/global-menu.png and b/static/img/rancher/global-menu.png differ diff --git a/static/img/rancher/health-check-section.png b/static/img/rancher/health-check-section.png new file mode 100644 index 00000000000..4a4bfafe128 Binary files /dev/null and b/static/img/rancher/health-check-section.png differ diff --git a/static/img/rancher/healthcheck-cmd-exec.png b/static/img/rancher/healthcheck-cmd-exec.png new file mode 100644 index 00000000000..06b6b22ab6c Binary files /dev/null and b/static/img/rancher/healthcheck-cmd-exec.png differ diff --git a/static/img/rancher/healthcheck.svg b/static/img/rancher/healthcheck.svg new file mode 100644 index 00000000000..55b573e578f --- /dev/null +++ b/static/img/rancher/healthcheck.svg @@ -0,0 +1,2 @@ + +
Rancher v1.6 Stack
[Not supported by viewer]
Node
[Not supported by viewer]
Nginx
Nginx
Node
[Not supported by viewer]
Healthcheck
Microservice
[Not supported by viewer]
2. Monitored container responds 
to check with a response (success)
or no response (failure).
[Not supported by viewer]
1. Healthcheck Microservice 
checks for open port (TCP)
or makes a GET request (HTTP)
across hosts to monitored container.
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/import-yaml-error.png b/static/img/rancher/import-yaml-error.png new file mode 100644 index 00000000000..8af7a0878ce Binary files /dev/null and b/static/img/rancher/import-yaml-error.png differ diff --git a/static/img/rancher/imported-workloads.png b/static/img/rancher/imported-workloads.png new file mode 100644 index 00000000000..75142fd0510 Binary files /dev/null and b/static/img/rancher/imported-workloads.png differ diff --git a/static/img/rancher/istio-ingress.svg b/static/img/rancher/istio-ingress.svg new file mode 100644 index 00000000000..abb12973085 --- /dev/null +++ b/static/img/rancher/istio-ingress.svg @@ -0,0 +1,3 @@ + + +
K8s Service
[Not supported by viewer]
Istio Service Mesh
<font style="font-size: 25px">Istio Service Mesh<br></font>
Default Nginx Ingress Controller
[Not supported by viewer]
K8s Service
[Not supported by viewer]
K8s Service
[Not supported by viewer]
Istio Ingress Gateway NodePort or Load Balancer
<font style="font-size: 20px" color="#FFFFFF">Istio Ingress Gateway NodePort or Load Balancer<br></font>
Two Ingresses for Istio-enabled Clusters
<font style="font-size: 21px">Two Ingresses for Istio-enabled Clusters<br></font>
  • Allows Kubernetes services to access external traffic
  • Default for every RKE provisioned cluster
  • If only this ingress controller is enabled, Istio can only control service-to-service traffic
[Not supported by viewer]
  • Allows Istio to manage ingress traffic directly
  • Needs to be enabled in Istio settings
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/k3s-architecture-ha-server.png b/static/img/rancher/k3s-architecture-ha-server.png new file mode 100644 index 00000000000..a0a4405980c Binary files /dev/null and b/static/img/rancher/k3s-architecture-ha-server.png differ diff --git a/static/img/rancher/k3s-architecture-single-server.png b/static/img/rancher/k3s-architecture-single-server.png new file mode 100644 index 00000000000..9a3f1f69381 Binary files /dev/null and b/static/img/rancher/k3s-architecture-single-server.png differ diff --git a/static/img/rancher/k3s-ha-architecture.svg b/static/img/rancher/k3s-ha-architecture.svg new file mode 100644 index 00000000000..70493a11a0c --- /dev/null +++ b/static/img/rancher/k3s-ha-architecture.svg @@ -0,0 +1,3 @@ + + +
External Traffic
External Traffic
kubectl get pods
[Not supported by viewer]
K3s User
K3s User
Load
Balancer
[Not supported by viewer]
K3s
Server
[Not supported by viewer]

Server
Node
[Not supported by viewer]

Server
Node
[Not supported by viewer]

Server
Node
[Not supported by viewer]
External
Database
[Not supported by viewer]
K3s
Agents
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Load
Balancer
[Not supported by viewer]
also called worker nodes
also called worker nodes
Example configuration
for nodes running your apps and services
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/k3s-single-node-server-architecture.svg b/static/img/rancher/k3s-single-node-server-architecture.svg new file mode 100644 index 00000000000..00bf13979ec --- /dev/null +++ b/static/img/rancher/k3s-single-node-server-architecture.svg @@ -0,0 +1,3 @@ + + +
External Traffic
External Traffic
K3s
Server
[Not supported by viewer]

Server
Node
[Not supported by viewer]
Embedded
SQLite
Database
[Not supported by viewer]
K3s
Agents
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Agent
Node
[Not supported by viewer]
Load
Balancer
[Not supported by viewer]
also called worker nodes
also called worker nodes
Example configuration
for nodes running your apps and services
[Not supported by viewer]
kubectl get pods
[Not supported by viewer]
K3s User
K3s User
\ No newline at end of file diff --git a/static/img/rancher/kubernetes-load-balancer.svg b/static/img/rancher/kubernetes-load-balancer.svg new file mode 100644 index 00000000000..bf9de1a3986 --- /dev/null +++ b/static/img/rancher/kubernetes-load-balancer.svg @@ -0,0 +1,2 @@ + +
Kubernetes Cluster
[Not supported by viewer]
Node 3
Node 3
Node 4
Node 4
Ingress Controller
[Not supported by viewer]
Ingress Controller
[Not supported by viewer]
chat 2
chat 2
web 3
web 3
chat 3
chat 3
Node 1
Node 1
Node 2
Node 2
Ingress Controller
[Not supported by viewer]
Ingress Controller
[Not supported by viewer]
chat 1
[Not supported by viewer]
web 1
web 1
web 2
web 2
Resolves to: 

- Node 1 IP: 80
- Node 2 IP: 80
- Node 3 IP: 80
- Nod 4 IP: 80
[Not supported by viewer]
web.com/login
web.com/login
chat.com/login
chat.com/login
Nginx Global Load Balancer
Nginx Global Load Balancer
\ No newline at end of file diff --git a/static/img/rancher/layer-4-lb-config-map.png b/static/img/rancher/layer-4-lb-config-map.png new file mode 100644 index 00000000000..cf5c9dc168d Binary files /dev/null and b/static/img/rancher/layer-4-lb-config-map.png differ diff --git a/static/img/rancher/linked-service-workload.png b/static/img/rancher/linked-service-workload.png new file mode 100644 index 00000000000..e0a1da0981f Binary files /dev/null and b/static/img/rancher/linked-service-workload.png differ diff --git a/static/img/rancher/liveness-check.png b/static/img/rancher/liveness-check.png new file mode 100644 index 00000000000..e88cb297aae Binary files /dev/null and b/static/img/rancher/liveness-check.png differ diff --git a/static/img/rancher/load-balancer-links.png b/static/img/rancher/load-balancer-links.png new file mode 100644 index 00000000000..5121abd0795 Binary files /dev/null and b/static/img/rancher/load-balancer-links.png differ diff --git a/static/img/rancher/load-balancer-ssl-certs.png b/static/img/rancher/load-balancer-ssl-certs.png new file mode 100644 index 00000000000..246ffd618f8 Binary files /dev/null and b/static/img/rancher/load-balancer-ssl-certs.png differ diff --git a/static/img/rancher/migrate-schedule-workloads.png b/static/img/rancher/migrate-schedule-workloads.png new file mode 100644 index 00000000000..c6ab638ac94 Binary files /dev/null and b/static/img/rancher/migrate-schedule-workloads.png differ diff --git a/static/img/rancher/node-schedule-advanced-options.png b/static/img/rancher/node-schedule-advanced-options.png new file mode 100644 index 00000000000..1d83edc767e Binary files /dev/null and b/static/img/rancher/node-schedule-advanced-options.png differ diff --git a/static/img/rancher/node-schedule-antiaffinity.png b/static/img/rancher/node-schedule-antiaffinity.png new file mode 100644 index 00000000000..74bd0455b50 Binary files /dev/null and b/static/img/rancher/node-schedule-antiaffinity.png differ diff --git a/static/img/rancher/node-scheduling-affinity.png b/static/img/rancher/node-scheduling-affinity.png new file mode 100644 index 00000000000..28d44908232 Binary files /dev/null and b/static/img/rancher/node-scheduling-affinity.png differ diff --git a/static/img/rancher/node-scheduling-labels.png b/static/img/rancher/node-scheduling-labels.png new file mode 100644 index 00000000000..4e1a634e74b Binary files /dev/null and b/static/img/rancher/node-scheduling-labels.png differ diff --git a/static/img/rancher/node-scheduling.png b/static/img/rancher/node-scheduling.png new file mode 100644 index 00000000000..953208144c7 Binary files /dev/null and b/static/img/rancher/node-scheduling.png differ diff --git a/static/img/rancher/one-six-schedule.png b/static/img/rancher/one-six-schedule.png new file mode 100644 index 00000000000..5bc05d915f8 Binary files /dev/null and b/static/img/rancher/one-six-schedule.png differ diff --git a/static/img/rancher/output-dot-text.png b/static/img/rancher/output-dot-text.png new file mode 100644 index 00000000000..ca39b2867b3 Binary files /dev/null and b/static/img/rancher/output-dot-text.png differ diff --git a/static/img/rancher/probes.svg b/static/img/rancher/probes.svg new file mode 100644 index 00000000000..007abfda6c1 --- /dev/null +++ b/static/img/rancher/probes.svg @@ -0,0 +1,2 @@ + +
Rancher v2.0 Kubernetes Cluster
<div style="text-align: center ; font-size: 18px"><font color="#3d3d3d">Rancher v2.0 Kubernetes Cluster</font></div>
Node
[Not supported by viewer]
Nginx
Nginx<br>
kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Nginx
Nginx<br>
kubelet
[Not supported by viewer]
1. On this node, the kubelet runs
 a liveness probe on a pod that's 
running. The pod either sends backs 
a response (success) or doesn't (failure) 
[Not supported by viewer]
2. On this node, the kubelets runs a
 readiness probe on a pod that's in 
the process of restarting. The probe 
finds that the pod is busy,so Kubernetes
 does not send it any requests.  
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture-cluster-controller.svg b/static/img/rancher/rancher-architecture-cluster-controller.svg new file mode 100644 index 00000000000..ce9fb2958f6 --- /dev/null +++ b/static/img/rancher/rancher-architecture-cluster-controller.svg @@ -0,0 +1,3 @@ + + +
User Cluster 1
<font style="font-size: 20px">User Cluster 1</font>
kubectl get pods
[Not supported by viewer]
kube-api-auth
[Not supported by viewer]
Bob
[Not supported by viewer]
Alice
Alice
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
etcd Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
etcd Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
etcd Node
[Not supported by viewer]
4
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Worker Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Controlplane
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Controlplane
Node
[Not supported by viewer]
Kubernetes API Server
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Worker Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Worker Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Worker Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node Agent
[Not supported by viewer]
Worker Node
[Not supported by viewer]
kubectl get pods
[Not supported by viewer]
Cluster Agent
[Not supported by viewer]
1
[Not supported by viewer]
Rancher Server
<font style="font-size: 20px">Rancher Server<br></font>
Cluster Controller 1
[Not supported by viewer]
Cluster Controller 2
[Not supported by viewer]
Cluster Controller 3
[Not supported by viewer]
2
[Not supported by viewer]
3
[Not supported by viewer]
Tunnel
Tunnel
Tunnel
Tunnel
Tunnel
Tunnel
Tunnel
Tunnel
User Cluster 2
[Not supported by viewer]
User Cluster 3
[Not supported by viewer]
Authentication Proxy
[Not supported by viewer]
Kubernetes provisioned
by Rancher Kubernetes
Engine
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture-node-roles.svg b/static/img/rancher/rancher-architecture-node-roles.svg new file mode 100644 index 00000000000..b96c56d1d2c --- /dev/null +++ b/static/img/rancher/rancher-architecture-node-roles.svg @@ -0,0 +1,3 @@ + + +
Roles for Nodes in a High-Availability Rancher Server Cluster
<font style="font-size: 16px">Roles for Nodes in a High-Availability Rancher Server Cluster<br></font>
Roles for Nodes in a Downstream User Cluster
<font style="font-size: 16px">Roles for Nodes in a Downstream User Cluster<br></font>
Kubernetes Master
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node with etcd, worker, and controlplane roles
[Not supported by viewer]
Node with etcd, worker, and controlplane roles
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node with etcd, worker, and controlplane roles
[Not supported by viewer]
etcd Nodes
etcd Nodes
Kubelet
[Not supported by viewer]
etcd Nodes
etcd Nodes
Kubelet
[Not supported by viewer]
Node with etcd role
Node with etcd role
Kubelet
[Not supported by viewer]
etcd Nodes
etcd Nodes
Kubelet
[Not supported by viewer]
Node with controlplane role
Node with controlplane role
Kubelet
[Not supported by viewer]
Note: A kubelet is an agent that runs on each node in the cluster. It makes sure that containers are running in a pod.
Note: A kubelet is an agent that runs on each node in the cluster. It makes sure that containers are running in a pod.
Node with worker role
Node with worker role
Kubelet
[Not supported by viewer]
Node with worker role
Node with worker role
Kubelet
[Not supported by viewer]
Node with worker role
Node with worker role
Kubelet
[Not supported by viewer]
Node with worker role
Node with worker role
Kubelet
[Not supported by viewer]
Node with worker role
Node with worker role
Kubelet
[Not supported by viewer]
Kubernetes Master
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture-rancher-api-server.svg b/static/img/rancher/rancher-architecture-rancher-api-server.svg new file mode 100644 index 00000000000..0433bbcd803 --- /dev/null +++ b/static/img/rancher/rancher-architecture-rancher-api-server.svg @@ -0,0 +1,3 @@ + + +
Rancher UI,
CLI, or API
[Not supported by viewer]
kubectl,
Kubernetes
API
[Not supported by viewer]
RKE Nodes
[Not supported by viewer]
Amazon
EKS Nodes
[Not supported by viewer]
RKE
Kubernetes API Server
[Not supported by viewer]
Cluster Agent 1
[Not supported by viewer]
Cluster Agent 2
[Not supported by viewer]
Cluster Controller 1
[Not supported by viewer]
Rancher API
Server
[Not supported by viewer]
Authentication Proxy
[Not supported by viewer]
Rancher Server
<font style="font-size: 20px">Rancher Server<br></font>
etcd
[Not supported by viewer]
Rancher Server
Data Store
[Not supported by viewer]
EKS
Control Plane
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Cluster Controller 2
[Not supported by viewer]
Downstream User
Cluster 1
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Downstream User
Cluster 2
[Not supported by viewer]
Rancher User
Rancher User
Kubernetes provisioned
by Rancher Kubernetes
Engine
[Not supported by viewer]
Kubernetes provisioned
by Amazon Elastic
Kubernetes Service
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture-rancher-components.svg b/static/img/rancher/rancher-architecture-rancher-components.svg new file mode 100644 index 00000000000..5762038ac3a --- /dev/null +++ b/static/img/rancher/rancher-architecture-rancher-components.svg @@ -0,0 +1,3 @@ + + +
API Framework
and Types
[Not supported by viewer]
Norman
[Not supported by viewer]
Types
[Not supported by viewer]
Rancher Server
<font style="font-size: 20px">Rancher Server<br></font>
User Interface
[Not supported by viewer]
Rancher UI
[Not supported by viewer]
Rancher API UI
[Not supported by viewer]
Utilities
[Not supported by viewer]
Rancher CLI
[Not supported by viewer]
Catalog Applications
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture-separation-of-rancher-server.svg b/static/img/rancher/rancher-architecture-separation-of-rancher-server.svg new file mode 100644 index 00000000000..a4f9fe3a36d --- /dev/null +++ b/static/img/rancher/rancher-architecture-separation-of-rancher-server.svg @@ -0,0 +1,3 @@ + + +
Separation of Single Node Rancher Server and User Clusters
<font style="font-size: 20px">Separation of Single Node Rancher Server and User Clusters<br></font>
Separation of High-availability Rancher Server and User Clusters
<font style="font-size: 20px">Separation of High-availability Rancher Server and User Clusters<br></font>
Rancher Server
[Not supported by viewer]
Load Balancer
[Not supported by viewer]
Rancher Users
Rancher Users
Rancher Users
Rancher Users
User
Kubernetes
Cluster
[Not supported by viewer]
User
Kubernetes
Cluster
[Not supported by viewer]
User
Kubernetes
Cluster
[Not supported by viewer]
Rancher Server Kubernetes Cluster
[Not supported by viewer]
User
Kubernetes
Cluster
[Not supported by viewer]
User
Kubernetes
Cluster
[Not supported by viewer]
User
Kubernetes
Cluster
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-architecture.svg b/static/img/rancher/rancher-architecture.svg index 412283bfb9a..402e2a1da89 100644 --- a/static/img/rancher/rancher-architecture.svg +++ b/static/img/rancher/rancher-architecture.svg @@ -1,2 +1,3 @@ + -
Cluster Agent 1
[Not supported by viewer]
Cluster Agent 2
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Rancher UI
CLI
API
[Not supported by viewer]
kubectl
K8s API
[Not supported by viewer]
Cluster Controller
[Not supported by viewer]
Rancher API
Server
[Not supported by viewer]
Auth Proxy
[Not supported by viewer]
Rancher Server
[Not supported by viewer]
RKE Nodes
[Not supported by viewer]
AWS EKS Nodes
[Not supported by viewer]
etcd
[Not supported by viewer]
RKE
K8s Master
[Not supported by viewer]
RKE
K8s Master
[Not supported by viewer]
\ No newline at end of file +
Cluster Agent 1
[Not supported by viewer]
Cluster Agent 2
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Kubelet
[Not supported by viewer]
Rancher UI
CLI
API
[Not supported by viewer]
kubectl
K8s API
[Not supported by viewer]
Cluster Controller
[Not supported by viewer]
Rancher API
Server
[Not supported by viewer]
Auth Proxy
[Not supported by viewer]
Rancher Server
[Not supported by viewer]
RKE Nodes
[Not supported by viewer]
AWS EKS Nodes
[Not supported by viewer]
etcd
[Not supported by viewer]
RKE
K8s Master
[Not supported by viewer]
EKS
K8s Master
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/rancher-storage.svg b/static/img/rancher/rancher-storage.svg new file mode 100644 index 00000000000..fd46adf9ef5 --- /dev/null +++ b/static/img/rancher/rancher-storage.svg @@ -0,0 +1,3 @@ + + +
       Setting Up Existing Persistent Volumes
Setting Up Existing Persistent Vo...
Dynamically Provisioning New Persistent Volumes
Dynamically Provisioning New Persistent Volume...
Persistent Volume Claim
Persistent Volu...
Pod
Pod
Docker Volume
Docker Volume
Persistent Volume
Persistent Volu...
Existing
AWS EBS Volume
Existing...
Persistent Volume Claim
Persistent Volu...
Pod
Pod
Docker Volume
Docker Volume
Persistent Volume
Persistent Volu...
New
AWS EBS Volume
New...
Storage
Class
Storage...
Created on Demand
Created on Demand
Cluster-level Resources
Cluster-level Resources
Allows pod to
consume PVs
Allows pod to...
Creates PVs
Creates PVs
Key
Key
Kubernetes resource
Kubernetes resource
Cluster-level Resources
Cluster-level Resources
Kubernetes master
watches for PVCs,
finds matching PVs,
and binds them
Kubernetes master...
Kubernetes master
binds the new PV to
the PVC it was created for
Kubernetes master...
\ No newline at end of file diff --git a/static/img/rancher/readiness-check-http.png b/static/img/rancher/readiness-check-http.png new file mode 100644 index 00000000000..1b2b19c2a75 Binary files /dev/null and b/static/img/rancher/readiness-check-http.png differ diff --git a/static/img/rancher/readiness-check-tcp.png b/static/img/rancher/readiness-check-tcp.png new file mode 100644 index 00000000000..0ba9869eb7c Binary files /dev/null and b/static/img/rancher/readiness-check-tcp.png differ diff --git a/static/img/rancher/readiness-check.png b/static/img/rancher/readiness-check.png new file mode 100644 index 00000000000..f978079aff7 Binary files /dev/null and b/static/img/rancher/readiness-check.png differ diff --git a/static/img/rancher/resolve-affinity.png b/static/img/rancher/resolve-affinity.png new file mode 100644 index 00000000000..d705a2c4fd8 Binary files /dev/null and b/static/img/rancher/resolve-affinity.png differ diff --git a/static/img/rancher/resolve-global.png b/static/img/rancher/resolve-global.png new file mode 100644 index 00000000000..583c500b8f6 Binary files /dev/null and b/static/img/rancher/resolve-global.png differ diff --git a/static/img/rancher/resolve-health-checks.png b/static/img/rancher/resolve-health-checks.png new file mode 100644 index 00000000000..3b7bfe282d1 Binary files /dev/null and b/static/img/rancher/resolve-health-checks.png differ diff --git a/static/img/rancher/resolve-links.png b/static/img/rancher/resolve-links.png new file mode 100644 index 00000000000..1f0544268f2 Binary files /dev/null and b/static/img/rancher/resolve-links.png differ diff --git a/static/img/rancher/resolve-load-balancer.png b/static/img/rancher/resolve-load-balancer.png new file mode 100644 index 00000000000..a03951098cf Binary files /dev/null and b/static/img/rancher/resolve-load-balancer.png differ diff --git a/static/img/rancher/resolve-pull-image.png b/static/img/rancher/resolve-pull-image.png new file mode 100644 index 00000000000..a822469d795 Binary files /dev/null and b/static/img/rancher/resolve-pull-image.png differ diff --git a/static/img/rancher/resolve-scale.png b/static/img/rancher/resolve-scale.png new file mode 100644 index 00000000000..5d36dec666a Binary files /dev/null and b/static/img/rancher/resolve-scale.png differ diff --git a/static/img/rancher/resource-constraint-settings.png b/static/img/rancher/resource-constraint-settings.png new file mode 100644 index 00000000000..68bf73cfc5d Binary files /dev/null and b/static/img/rancher/resource-constraint-settings.png differ diff --git a/static/img/rancher/schedule-specific-node.png b/static/img/rancher/schedule-specific-node.png new file mode 100644 index 00000000000..211bd90a190 Binary files /dev/null and b/static/img/rancher/schedule-specific-node.png differ diff --git a/static/img/rancher/scheduled-nodes.png b/static/img/rancher/scheduled-nodes.png new file mode 100644 index 00000000000..14807de68f8 Binary files /dev/null and b/static/img/rancher/scheduled-nodes.png differ diff --git a/static/img/rancher/separate-check.png b/static/img/rancher/separate-check.png new file mode 100644 index 00000000000..d094073c02e Binary files /dev/null and b/static/img/rancher/separate-check.png differ diff --git a/static/img/rancher/view-edit-yaml.png b/static/img/rancher/view-edit-yaml.png new file mode 100644 index 00000000000..36574ffa618 Binary files /dev/null and b/static/img/rancher/view-edit-yaml.png differ diff --git a/static/img/rancher/workload-scale.png b/static/img/rancher/workload-scale.png new file mode 100644 index 00000000000..f8aa87a6d5c Binary files /dev/null and b/static/img/rancher/workload-scale.png differ diff --git a/static/img/rancher/workload-type-option.png b/static/img/rancher/workload-type-option.png new file mode 100644 index 00000000000..02c74e29a6e Binary files /dev/null and b/static/img/rancher/workload-type-option.png differ diff --git a/static/img/rancher/workload-type.png b/static/img/rancher/workload-type.png new file mode 100644 index 00000000000..cfa3493381d Binary files /dev/null and b/static/img/rancher/workload-type.png differ diff --git a/yarn.lock b/yarn.lock index 4a53584bf15..9f22028add4 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1866,10 +1866,22 @@ isstream@~0.1.2: resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= -jquery@^3.3.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.3.1.tgz#958ce29e81c9790f31be7792df5d4d95fc57fbca" - integrity sha512-Ubldcmxp5np52/ENotGxlLe6aGMvmF4R8S6tZjsP6Knsaxd/xp3Zrh50cG93lR6nPXyUFwzN3ZSOQI0wRJNdGg== +izimodal@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/izimodal/-/izimodal-1.5.1.tgz#a49145030d8d9edfc60dfc35ae1758e4cf8502f1" + integrity sha512-DqqGUd741tAqJnWZMQRgixlgtSB6tb/HhfddmlsFWE5P7sckF2SmKVyyttpAdBdN5LUzQiF/R/+IjJw0TS5oTA== + dependencies: + jquery "~2" + +jquery@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.4.0.tgz#8de513fa0fa4b2c7d2e48a530e26f0596936efdf" + integrity sha512-ggRCXln9zEqv6OqAGXFEcshF5dSBvCkzj6Gm2gzuR5fWawaX8t7cxKVkkygKODrDAzKdoYw3l/e3pm3vlT4IbQ== + +jquery@~2: + version "2.2.4" + resolved "https://registry.yarnpkg.com/jquery/-/jquery-2.2.4.tgz#2c89d6889b5eac522a7eea32c14521559c6cbf02" + integrity sha1-LInWiJterFIqfuoywUUhVZxsvwI= js-levenshtein@^1.1.3: version "1.1.6" @@ -2023,9 +2035,9 @@ lodash.sortby@^4.7.0: integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= lodash@^4.13.1, lodash@^4.17.10, lodash@^4.17.5: - version "4.17.11" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d" - integrity sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg== + version "4.17.15" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" + integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== loose-envify@^1.0.0: version "1.4.0" @@ -2034,6 +2046,11 @@ loose-envify@^1.0.0: dependencies: js-tokens "^3.0.0 || ^4.0.0" +lory.js@^2.4.1: + version "2.5.3" + resolved "https://registry.yarnpkg.com/lory.js/-/lory.js-2.5.3.tgz#157d6bcf64105d56b1fdad763940e79912db19b4" + integrity sha512-9FKuaeLtSKupM9BNmcCY0W31yhloZv2vEMD/v0hnwsdajqzb8bQacD5ZxZw+WUD0dRAXM+qx65Vk1m++4qkcsQ== + map-age-cleaner@^0.1.1: version "0.1.3" resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" @@ -2147,9 +2164,9 @@ minizlib@^1.1.1: minipass "^2.2.1" mixin-deep@^1.2.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.1.tgz#a49e7268dce1a0d9698e45326c5626df3543d0fe" - integrity sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ== + version "1.3.2" + resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566" + integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA== dependencies: for-in "^1.0.2" is-extendable "^1.0.1" @@ -2166,7 +2183,14 @@ mkdirp@^0.5.0, mkdirp@^0.5.1: dependencies: minimist "0.0.8" -moment@^2.20.1: +moment-timezone@^0.5.26: + version "0.5.27" + resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.27.tgz#73adec8139b6fe30452e78f210f27b1f346b8877" + integrity sha512-EIKQs7h5sAsjhPCqN6ggx6cEbs94GK050254TIJySD1bzoM5JTYDwAU1IoVOeTOL6Gm27kYJ51/uuvq1kIlrbw== + dependencies: + moment ">= 2.9.0" + +"moment@>= 2.9.0", moment@^2.20.1: version "2.24.0" resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b" integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==