diff --git a/static/img/os/Rancher_aws1.png b/assets/img/os/Rancher_aws1.png similarity index 100% rename from static/img/os/Rancher_aws1.png rename to assets/img/os/Rancher_aws1.png diff --git a/static/img/os/Rancher_aws2.png b/assets/img/os/Rancher_aws2.png similarity index 100% rename from static/img/os/Rancher_aws2.png rename to assets/img/os/Rancher_aws2.png diff --git a/static/img/os/Rancher_aws3.png b/assets/img/os/Rancher_aws3.png similarity index 100% rename from static/img/os/Rancher_aws3.png rename to assets/img/os/Rancher_aws3.png diff --git a/static/img/os/Rancher_aws4.png b/assets/img/os/Rancher_aws4.png similarity index 100% rename from static/img/os/Rancher_aws4.png rename to assets/img/os/Rancher_aws4.png diff --git a/static/img/os/Rancher_aws5.png b/assets/img/os/Rancher_aws5.png similarity index 100% rename from static/img/os/Rancher_aws5.png rename to assets/img/os/Rancher_aws5.png diff --git a/static/img/os/Rancher_aws6.png b/assets/img/os/Rancher_aws6.png similarity index 100% rename from static/img/os/Rancher_aws6.png rename to assets/img/os/Rancher_aws6.png diff --git a/static/img/os/Rancher_busydash.png b/assets/img/os/Rancher_busydash.png similarity index 100% rename from static/img/os/Rancher_busydash.png rename to assets/img/os/Rancher_busydash.png diff --git a/static/img/os/rancheroshowitworks.png b/assets/img/os/rancheroshowitworks.png similarity index 100% rename from static/img/os/rancheroshowitworks.png rename to assets/img/os/rancheroshowitworks.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-1.png b/assets/img/rancher/adfs/adfs-add-rpt-1.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-1.png rename to assets/img/rancher/adfs/adfs-add-rpt-1.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-10.png b/assets/img/rancher/adfs/adfs-add-rpt-10.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-10.png rename to assets/img/rancher/adfs/adfs-add-rpt-10.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-11.png b/assets/img/rancher/adfs/adfs-add-rpt-11.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-11.png rename to assets/img/rancher/adfs/adfs-add-rpt-11.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-2.png b/assets/img/rancher/adfs/adfs-add-rpt-2.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-2.png rename to assets/img/rancher/adfs/adfs-add-rpt-2.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-3.png b/assets/img/rancher/adfs/adfs-add-rpt-3.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-3.png rename to assets/img/rancher/adfs/adfs-add-rpt-3.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-4.png b/assets/img/rancher/adfs/adfs-add-rpt-4.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-4.png rename to assets/img/rancher/adfs/adfs-add-rpt-4.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-5.png b/assets/img/rancher/adfs/adfs-add-rpt-5.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-5.png rename to assets/img/rancher/adfs/adfs-add-rpt-5.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-6.png b/assets/img/rancher/adfs/adfs-add-rpt-6.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-6.png rename to assets/img/rancher/adfs/adfs-add-rpt-6.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-7.png b/assets/img/rancher/adfs/adfs-add-rpt-7.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-7.png rename to assets/img/rancher/adfs/adfs-add-rpt-7.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-8.png b/assets/img/rancher/adfs/adfs-add-rpt-8.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-8.png rename to assets/img/rancher/adfs/adfs-add-rpt-8.png diff --git a/static/img/rancher/adfs/adfs-add-rpt-9.png b/assets/img/rancher/adfs/adfs-add-rpt-9.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-rpt-9.png rename to assets/img/rancher/adfs/adfs-add-rpt-9.png diff --git a/static/img/rancher/adfs/adfs-add-tcr-1.png b/assets/img/rancher/adfs/adfs-add-tcr-1.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-tcr-1.png rename to assets/img/rancher/adfs/adfs-add-tcr-1.png diff --git a/static/img/rancher/adfs/adfs-add-tcr-2.png b/assets/img/rancher/adfs/adfs-add-tcr-2.png similarity index 100% rename from static/img/rancher/adfs/adfs-add-tcr-2.png rename to assets/img/rancher/adfs/adfs-add-tcr-2.png diff --git a/static/img/rancher/adfs/adfs-edit-cr.png b/assets/img/rancher/adfs/adfs-edit-cr.png similarity index 100% rename from static/img/rancher/adfs/adfs-edit-cr.png rename to assets/img/rancher/adfs/adfs-edit-cr.png diff --git a/static/img/rancher/adfs/adfs-overview.png b/assets/img/rancher/adfs/adfs-overview.png similarity index 100% rename from static/img/rancher/adfs/adfs-overview.png rename to assets/img/rancher/adfs/adfs-overview.png diff --git a/static/img/rancher/airgap/edit-system-default-registry.png b/assets/img/rancher/airgap/edit-system-default-registry.png similarity index 100% rename from static/img/rancher/airgap/edit-system-default-registry.png rename to assets/img/rancher/airgap/edit-system-default-registry.png diff --git a/static/img/rancher/airgap/enter-system-default-registry.png b/assets/img/rancher/airgap/enter-system-default-registry.png similarity index 100% rename from static/img/rancher/airgap/enter-system-default-registry.png rename to assets/img/rancher/airgap/enter-system-default-registry.png diff --git a/static/img/rancher/airgap/privateregistry.svg b/assets/img/rancher/airgap/privateregistry.svg similarity index 100% rename from static/img/rancher/airgap/privateregistry.svg rename to assets/img/rancher/airgap/privateregistry.svg diff --git a/static/img/rancher/airgap/privateregistrypushpull.svg b/assets/img/rancher/airgap/privateregistrypushpull.svg similarity index 100% rename from static/img/rancher/airgap/privateregistrypushpull.svg rename to assets/img/rancher/airgap/privateregistrypushpull.svg diff --git a/static/img/rancher/airgap/settings.png b/assets/img/rancher/airgap/settings.png similarity index 100% rename from static/img/rancher/airgap/settings.png rename to assets/img/rancher/airgap/settings.png diff --git a/static/img/rancher/airgap/system-charts-setting.png b/assets/img/rancher/airgap/system-charts-setting.png similarity index 100% rename from static/img/rancher/airgap/system-charts-setting.png rename to assets/img/rancher/airgap/system-charts-setting.png diff --git a/static/img/rancher/airgap/system-charts-update.png b/assets/img/rancher/airgap/system-charts-update.png similarity index 100% rename from static/img/rancher/airgap/system-charts-update.png rename to assets/img/rancher/airgap/system-charts-update.png diff --git a/static/img/rancher/bpg/hub-and-spoke.png b/assets/img/rancher/bpg/hub-and-spoke.png similarity index 100% rename from static/img/rancher/bpg/hub-and-spoke.png rename to assets/img/rancher/bpg/hub-and-spoke.png diff --git a/static/img/rancher/bpg/regional.png b/assets/img/rancher/bpg/regional.png similarity index 100% rename from static/img/rancher/bpg/regional.png rename to assets/img/rancher/bpg/regional.png diff --git a/static/img/rancher/bulk-key-values.gif b/assets/img/rancher/bulk-key-values.gif similarity index 100% rename from static/img/rancher/bulk-key-values.gif rename to assets/img/rancher/bulk-key-values.gif diff --git a/static/img/rancher/canal-diagram.png b/assets/img/rancher/canal-diagram.png similarity index 100% rename from static/img/rancher/canal-diagram.png rename to assets/img/rancher/canal-diagram.png diff --git a/static/img/rancher/globalpermissionrole.png b/assets/img/rancher/globalpermissionrole.png similarity index 100% rename from static/img/rancher/globalpermissionrole.png rename to assets/img/rancher/globalpermissionrole.png diff --git a/static/img/rancher/globalpermissionuser.png b/assets/img/rancher/globalpermissionuser.png similarity index 100% rename from static/img/rancher/globalpermissionuser.png rename to assets/img/rancher/globalpermissionuser.png diff --git a/static/img/rancher/ha/nlb/add-targets-targetgroup-443.png b/assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/add-targets-targetgroup-443.png rename to assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/added-targets-targetgroup-443.png b/assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/added-targets-targetgroup-443.png rename to assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-443-advanced.png b/assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-443-advanced.png rename to assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-443.png b/assets/img/rancher/ha/nlb/create-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-443.png rename to assets/img/rancher/ha/nlb/create-targetgroup-443.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-80-advanced.png b/assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-80-advanced.png rename to assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png diff --git a/static/img/rancher/ha/nlb/create-targetgroup-80.png b/assets/img/rancher/ha/nlb/create-targetgroup-80.png similarity index 100% rename from static/img/rancher/ha/nlb/create-targetgroup-80.png rename to assets/img/rancher/ha/nlb/create-targetgroup-80.png diff --git a/static/img/rancher/ha/nlb/ec2-loadbalancing.png b/assets/img/rancher/ha/nlb/ec2-loadbalancing.png similarity index 100% rename from static/img/rancher/ha/nlb/ec2-loadbalancing.png rename to assets/img/rancher/ha/nlb/ec2-loadbalancing.png diff --git a/static/img/rancher/ha/nlb/edit-targetgroup-443.png b/assets/img/rancher/ha/nlb/edit-targetgroup-443.png similarity index 100% rename from static/img/rancher/ha/nlb/edit-targetgroup-443.png rename to assets/img/rancher/ha/nlb/edit-targetgroup-443.png diff --git a/static/img/rancher/ldapsearch-group.png b/assets/img/rancher/ldapsearch-group.png similarity index 100% rename from static/img/rancher/ldapsearch-group.png rename to assets/img/rancher/ldapsearch-group.png diff --git a/static/img/rancher/ldapsearch-user.png b/assets/img/rancher/ldapsearch-user.png similarity index 100% rename from static/img/rancher/ldapsearch-user.png rename to assets/img/rancher/ldapsearch-user.png diff --git a/assets/img/rancher/rancher_overview.png b/assets/img/rancher/rancher_overview.png new file mode 100644 index 00000000000..c445fec3710 Binary files /dev/null and b/assets/img/rancher/rancher_overview.png differ diff --git a/assets/img/rancher/rancher_overview_2.png b/assets/img/rancher/rancher_overview_2.png new file mode 100644 index 00000000000..00ce8eb2c27 Binary files /dev/null and b/assets/img/rancher/rancher_overview_2.png differ diff --git a/static/img/rancher/rancherroles1.png b/assets/img/rancher/rancherroles1.png similarity index 100% rename from static/img/rancher/rancherroles1.png rename to assets/img/rancher/rancherroles1.png diff --git a/static/img/rancher/rancheruser.png b/assets/img/rancher/rancheruser.png similarity index 100% rename from static/img/rancher/rancheruser.png rename to assets/img/rancher/rancheruser.png diff --git a/static/img/rancher/set-hostport.gif b/assets/img/rancher/set-hostport.gif similarity index 100% rename from static/img/rancher/set-hostport.gif rename to assets/img/rancher/set-hostport.gif diff --git a/static/img/rancher/set-nodeport.gif b/assets/img/rancher/set-nodeport.gif similarity index 100% rename from static/img/rancher/set-nodeport.gif rename to assets/img/rancher/set-nodeport.gif diff --git a/static/img/rancher/vsphere-cluster-create-1.png b/assets/img/rancher/vsphere-cluster-create-1.png similarity index 100% rename from static/img/rancher/vsphere-cluster-create-1.png rename to assets/img/rancher/vsphere-cluster-create-1.png diff --git a/static/img/rancher/vsphere-node-driver-cloudprovider.png b/assets/img/rancher/vsphere-node-driver-cloudprovider.png similarity index 100% rename from static/img/rancher/vsphere-node-driver-cloudprovider.png rename to assets/img/rancher/vsphere-node-driver-cloudprovider.png diff --git a/static/img/rancher/vsphere-node-template-1.png b/assets/img/rancher/vsphere-node-template-1.png similarity index 100% rename from static/img/rancher/vsphere-node-template-1.png rename to assets/img/rancher/vsphere-node-template-1.png diff --git a/static/img/rancher/vsphere-node-template-2.png b/assets/img/rancher/vsphere-node-template-2.png similarity index 100% rename from static/img/rancher/vsphere-node-template-2.png rename to assets/img/rancher/vsphere-node-template-2.png diff --git a/static/img/rancher/vsphere-storage-class.png b/assets/img/rancher/vsphere-storage-class.png similarity index 100% rename from static/img/rancher/vsphere-storage-class.png rename to assets/img/rancher/vsphere-storage-class.png diff --git a/static/img/rancher/workload-add-volume.png b/assets/img/rancher/workload-add-volume.png similarity index 100% rename from static/img/rancher/workload-add-volume.png rename to assets/img/rancher/workload-add-volume.png diff --git a/static/img/rke/rke-etcd-backup.png b/assets/img/rke/rke-etcd-backup.png similarity index 100% rename from static/img/rke/rke-etcd-backup.png rename to assets/img/rke/rke-etcd-backup.png diff --git a/static/img/rke/vsphere-advanced-parameters.png b/assets/img/rke/vsphere-advanced-parameters.png similarity index 100% rename from static/img/rke/vsphere-advanced-parameters.png rename to assets/img/rke/vsphere-advanced-parameters.png diff --git a/static/img/rke/vsphere-nodedriver-enable-uuid.png b/assets/img/rke/vsphere-nodedriver-enable-uuid.png similarity index 100% rename from static/img/rke/vsphere-nodedriver-enable-uuid.png rename to assets/img/rke/vsphere-nodedriver-enable-uuid.png diff --git a/config.toml b/config.toml index 67d8a56f732..4cdea8ac743 100644 --- a/config.toml +++ b/config.toml @@ -5,6 +5,7 @@ title = "Rancher Labs" theme = "rancher-website-theme" themesDir = "node_modules" pluralizeListTitles = false +timeout = 30000 enableRobotsTXT = true pygmentsCodeFences = true diff --git a/content/k3s/latest/en/_index.md b/content/k3s/latest/en/_index.md index 6035751057e..85d522e5e37 100644 --- a/content/k3s/latest/en/_index.md +++ b/content/k3s/latest/en/_index.md @@ -1,6 +1,6 @@ --- -title: "K3S - 5 less than k8s" -shortTitle: K3S +title: "K3s - 5 less than K8s" +shortTitle: K3s date: 2019-02-05T09:52:46-07:00 name: "menu" --- @@ -18,18 +18,14 @@ Great for: What is this? --- -k3s is intended to be a fully compliant Kubernetes distribution with the following changes: +K3s is a fully compliant Kubernetes distribution with the following enhancements: -1. Legacy, alpha, non-default features are removed. Hopefully, you shouldn't notice the - stuff that has been removed. -2. Removed most in-tree plugins (cloud providers and storage plugins) which can be replaced - with out of tree addons. -3. Add sqlite3 as the default storage mechanism. etcd3 is still available, but not the default. -4. Wrapped in simple launcher that handles a lot of the complexity of TLS and options. -5. Minimal to no OS dependencies (just a sane kernel and cgroup mounts needed). k3s packages required - dependencies +* An embedded SQLite database has replaced etcd as the default datastore. External datastores such as PostgreSQL, MySQL, and etcd are also supported. +* Simple but powerful "batteries-included" features have been added, such as: a local storage provider, a service load balancer, a helm controller, and the Traefik ingress controller. +* Operation of all Kubernetes control plane components is encapsulated in a single binary and process. This allows K3s to automate and manage complex cluster operations like distributing certificates. +* In-tree cloud providers and storage plugins have been removed. +* External dependencies have been minimized (just a modern kernel and cgroup mounts needed). K3s packages required dependencies, including: * containerd * Flannel * CoreDNS - * CNI * Host utilities (iptables, socat, etc) diff --git a/content/k3s/latest/en/advanced/_index.md b/content/k3s/latest/en/advanced/_index.md new file mode 100644 index 00000000000..426a40d4afd --- /dev/null +++ b/content/k3s/latest/en/advanced/_index.md @@ -0,0 +1,106 @@ +--- +title: "Advanced Options" +weight: 40 +aliases: + - /k3s/latest/en/running/ +--- + +This section contains advanced information describing the different ways you can run and manage K3s. + +Starting the Server +------------------ + +The installation script will auto-detect if your OS is using systemd or openrc and start the service. +When running with openrc logs will be created at `/var/log/k3s.log`, or with systemd in `/var/log/syslog` and viewed using `journalctl -u k3s`. An example of installing and auto-starting with the install script: + +```bash +curl -sfL https://get.k3s.io | sh - +``` + +When running the server manually you should get an output similar to: + +``` +$ k3s server +INFO[2019-01-22T15:16:19.908493986-07:00] Starting k3s dev +INFO[2019-01-22T15:16:19.908934479-07:00] Running kube-apiserver --allow-privileged=true --authorization-mode Node,RBAC --service-account-signing-key-file /var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range 10.43.0.0/16 --advertise-port 6445 --advertise-address 127.0.0.1 --insecure-port 0 --secure-port 6444 --bind-address 127.0.0.1 --tls-cert-file /var/lib/rancher/k3s/server/tls/localhost.crt --tls-private-key-file /var/lib/rancher/k3s/server/tls/localhost.key --service-account-key-file /var/lib/rancher/k3s/server/tls/service.key --service-account-issuer k3s --api-audiences unknown --basic-auth-file /var/lib/rancher/k3s/server/cred/passwd --kubelet-client-certificate /var/lib/rancher/k3s/server/tls/token-node.crt --kubelet-client-key /var/lib/rancher/k3s/server/tls/token-node.key +Flag --insecure-port has been deprecated, This flag will be removed in a future version. +INFO[2019-01-22T15:16:20.196766005-07:00] Running kube-scheduler --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --port 0 --secure-port 0 --leader-elect=false +INFO[2019-01-22T15:16:20.196880841-07:00] Running kube-controller-manager --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --service-account-private-key-file /var/lib/rancher/k3s/server/tls/service.key --allocate-node-cidrs --cluster-cidr 10.42.0.0/16 --root-ca-file /var/lib/rancher/k3s/server/tls/token-ca.crt --port 0 --secure-port 0 --leader-elect=false +Flag --port has been deprecated, see --secure-port instead. +INFO[2019-01-22T15:16:20.273441984-07:00] Listening on :6443 +INFO[2019-01-22T15:16:20.278383446-07:00] Writing manifest: /var/lib/rancher/k3s/server/manifests/coredns.yaml +INFO[2019-01-22T15:16:20.474454524-07:00] Node token is available at /var/lib/rancher/k3s/server/node-token +INFO[2019-01-22T15:16:20.474471391-07:00] To join node to cluster: k3s agent -s https://10.20.0.3:6443 -t ${NODE_TOKEN} +INFO[2019-01-22T15:16:20.541027133-07:00] Wrote kubeconfig /etc/rancher/k3s/k3s.yaml +INFO[2019-01-22T15:16:20.541049100-07:00] Run: k3s kubectl +``` + +The output will likely be much longer as the agent will create a lot of logs. By default the server +will register itself as a node (run the agent). + +Alpine Linux +------------ + +In order to pre-setup Alpine Linux you have to go through the following steps: + +```bash +echo "cgroup /sys/fs/cgroup cgroup defaults 0 0" >> /etc/fstab + +cat >> /etc/cgconfig.conf < 11s v1.13.2-k3s2 + d54c8b17c055 Ready 11s v1.13.2-k3s2 + db7a5a5a5bdd Ready 12s v1.13.2-k3s2 + +To run the agent only in Docker, use `docker-compose up node`. Alternatively the Docker run command can also be used; + + sudo docker run \ + -d --tmpfs /run \ + --tmpfs /var/run \ + -e K3S_URL=${SERVER_URL} \ + -e K3S_TOKEN=${NODE_TOKEN} \ + --privileged rancher/k3s:vX.Y.Z + diff --git a/content/k3s/latest/en/building/_index.md b/content/k3s/latest/en/building/_index.md deleted file mode 100644 index e6afe642911..00000000000 --- a/content/k3s/latest/en/building/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Building from Source" -weight: 10 ---- - -This section provides information on building k3s from source. - -See the [release](https://github.com/rancher/k3s/releases/latest) page for pre-built releases. - -The clone will be much faster on this repo if you do - - git clone --depth 1 https://github.com/rancher/k3s.git - -This repo includes all of Kubernetes history so `--depth 1` will avoid most of that. - -To build the full release binary run `make` and that will create `./dist/artifacts/k3s`. - -Optionally to build the binaries without running linting or building docker images: -```sh -./scripts/download && ./scripts/build && ./scripts/package-cli -``` - -For development, you just need go 1.12 and a sane GOPATH. To compile the binaries run: -```bash -go build -o k3s -go build -o kubectl ./cmd/kubectl -go build -o hyperkube ./vendor/k8s.io/kubernetes/cmd/hyperkube -``` - -This will create the main executable, but it does not include the dependencies like containerd, CNI, -etc. To run a server and agent with all the dependencies for development run the following -helper scripts: -```bash -# Server -./scripts/dev-server.sh - -# Agent -./scripts/dev-agent.sh -``` - - -Kubernetes Source ------------------ - -The source code for Kubernetes is in `vendor/` and the location from which that is copied -is in `./vendor.conf`. Go to the referenced repo/tag and you'll find all the patches applied -to upstream Kubernetes. diff --git a/content/k3s/latest/en/configuration/_index.md b/content/k3s/latest/en/configuration/_index.md index bc3d785cf0b..1f3cc7d7bf8 100644 --- a/content/k3s/latest/en/configuration/_index.md +++ b/content/k3s/latest/en/configuration/_index.md @@ -1,9 +1,9 @@ --- title: "Configuration Info" -weight: 4 +weight: 50 --- -This section contains information on using k3s with various configurations. +This section contains information on using K3s with various configurations. Auto-Deploying Manifests @@ -12,7 +12,7 @@ Auto-Deploying Manifests Any file found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. -It is also possible to deploy Helm charts. k3s supports a CRD controller for installing charts. A YAML file specification can look as following (example taken from `/var/lib/rancher/k3s/server/manifests/traefik.yaml`): +It is also possible to deploy Helm charts. K3s supports a CRD controller for installing charts. A YAML file specification can look as following (example taken from `/var/lib/rancher/k3s/server/manifests/traefik.yaml`): ```yaml apiVersion: helm.cattle.io/v1 @@ -27,7 +27,7 @@ spec: ssl.enabled: "true" ``` -Keep in mind that `namespace` in your HelmChart resource metadata section should always be `kube-system`, because k3s deploy controller is configured to watch this namespace for new HelmChart resources. If you want to specify the namespace for the actual helm release, you can do that using `targetNamespace` key in the spec section: +Keep in mind that `namespace` in your HelmChart resource metadata section should always be `kube-system`, because the K3s deploy controller is configured to watch this namespace for new HelmChart resources. If you want to specify the namespace for the actual helm release, you can do that using `targetNamespace` key in the spec section: ``` apiVersion: helm.cattle.io/v1 @@ -53,51 +53,68 @@ spec: Also note that besides `set` you can use `valuesContent` in the spec section. And it's okay to use both of them. -k3s versions <= v0.5.0 used `k3s.cattle.io` for the api group of helmcharts, this has been changed to `helm.cattle.io` for later versions. +K3s versions `<= v0.5.0` used `k3s.cattle.io` for the api group of helmcharts, this has been changed to `helm.cattle.io` for later versions. + +Using the helm CRD +--------------------- + +You can deploy a 3rd party helm chart using an example like this: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: nginx + namespace: kube-system +spec: + chart: nginx + repo: https://charts.bitnami.com/bitnami + targetNamespace: default +``` + +You can install a specific version of a helm chart using an example like this: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: stable/nginx-ingress + namespace: kube-system +spec: + chart: nginx-ingress + version: 1.24.4 + targetNamespace: default +``` Accessing Cluster from Outside ----------------------------- Copy `/etc/rancher/k3s/k3s.yaml` on your machine located outside the cluster as `~/.kube/config`. Then replace -"localhost" with the IP or name of your k3s server. `kubectl` can now manage your k3s cluster. - -Open Ports / Network Security ---------------------------- - -The server needs port 6443 to be accessible by the nodes. The nodes need to be able to reach -other nodes over UDP port 8472. The nodes also need to be able to reach the server on UDP port 8472. This is used for flannel VXLAN. If you don't use flannel -and provide your own custom CNI, then 8472 is not needed by k3s. The node should not listen -on any other port. k3s uses reverse tunneling such that the nodes make outbound connections -to the server and all kubelet traffic runs through that tunnel. - -IMPORTANT. The VXLAN port on nodes should not be exposed to the world, it opens up your -cluster network to accessed by anyone. Run your nodes behind a firewall/security group that -disables access to port 8472. +"localhost" with the IP or name of your K3s server. `kubectl` can now manage your K3s cluster. Node Registration ----------------- Agents will register with the server using the node cluster secret along with a randomly generated -password for the node, stored at `/var/lib/rancher/k3s/agent/node-password.txt`. The server will +password for the node, stored at `/etc/rancher/node/password`. The server will store the passwords for individual nodes at `/var/lib/rancher/k3s/server/cred/node-passwd`, and any -subsequent attempts must use the same password. If the data directory of an agent is removed the -password file should be recreated for the agent, or the entry removed from the server. +subsequent attempts must use the same password. If the `/etc/rancher/node` directory of an agent is removed the +password file should be recreated for the agent, or the entry removed from the server. A unique node +id can be appended to the hostname by launching k3s servers or agents using the `--with-node-id` flag. Containerd and Docker ---------- -k3s includes and defaults to containerd. Why? Because it's just plain better. If you want to -run with Docker first stop and think, "Really? Do I really want more headache?" If still -yes then you just need to run the agent with the `--docker` flag. +K3s includes and defaults to containerd. If you want to use Docker instead of containerd then you simply need to run the agent with the `--docker` flag. -k3s will generate config.toml for containerd in `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`, for advanced customization for this file you can create another file called `config.toml.tmpl` in the same directory and it will be used instead. +K3s will generate config.toml for containerd in `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`, for advanced customization for this file you can create another file called `config.toml.tmpl` in the same directory and it will be used instead. The `config.toml.tmpl` will be treated as a Golang template file, and the `config.Node` structure is being passed to the template, the following is an example on how to use the structure to customize the configuration file https://github.com/rancher/k3s/blob/master/pkg/agent/templates/templates.go#L16-L32 -Rootless +Rootless (Experimental) -------- -_**WARNING**:_ Some advanced magic, user beware +_**WARNING**:_ Experimental feature Initial rootless support has been added but there are a series of significant usability issues surrounding it. We are releasing the initial support for those interested in rootless and hopefully some people can help to @@ -110,9 +127,9 @@ In short, latest Ubuntu is your best bet for this to work. * **Ports** - When running rootless a new network namespace is created. This means that k3s instance is running with networking - fairly detached from the host. The only way to access services run in k3s from the host is to setup port forwards - to the k3s network namespace. We have a controller that will automatically bind 6443 and service port below 1024 to the host with an offset of 10000. + When running rootless a new network namespace is created. This means that K3s instance is running with networking + fairly detached from the host. The only way to access services run in K3s from the host is to setup port forwards + to the K3s network namespace. We have a controller that will automatically bind 6443 and service port below 1024 to the host with an offset of 10000. That means service port 80 will become 10080 on the host, but 8080 will become 8080 without any offset. @@ -120,7 +137,7 @@ In short, latest Ubuntu is your best bet for this to work. * **Daemon lifecycle** - Once you kill k3s and then start a new instance of k3s it will create a new network namespace, but it doesn't kill the old pods. So you are left + Once you kill K3s and then start a new instance of K3s it will create a new network namespace, but it doesn't kill the old pods. So you are left with a fairly broken setup. This is the main issue at the moment, how to deal with the network namespace. The issue is tracked in https://github.com/rootless-containers/rootlesskit/issues/65 @@ -133,155 +150,16 @@ In short, latest Ubuntu is your best bet for this to work. Just add `--rootless` flag to either server or agent. So run `k3s server --rootless` and then look for the message `Wrote kubeconfig [SOME PATH]` for where your kubeconfig to access you cluster is. Be careful, if you use `-o` to write -the kubeconfig to a different directory it will probably not work. This is because the k3s instance in running in a different +the kubeconfig to a different directory it will probably not work. This is because the K3s instance in running in a different mount namespace. Node Labels and Taints ---------------------- -k3s agents can be configured with options `--node-label` and `--node-taint` which adds set of Labels and Taints to kubelet, the two options only adds labels/taints at registration time, so they can only be added once and not changed after that, an example of options to add new label is: +K3s agents can be configured with the options `--node-label` and `--node-taint` which adds a label and taint to the kubelet. The two options only add labels and/or taints at registration time, so they can only be added once and not changed after that again by running K3s. If you want to change node labels and taints after node registration you should use `kubectl`. Below is an example showing how to add labels and a taint: ``` --node-label foo=bar \ --node-label hello=world \ --node-taint key1=value1:NoExecute ``` -Flannel -------- - -Flannel is included by default, if you don't want flannel then run the agent with `--no-flannel` option. - -In this setup you will still be required to install your own CNI driver. More info [here](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network) - -CoreDNS -------- - -CoreDNS is deployed on start of the agent, to disable run the server with the `--no-deploy coredns` option. - -If you don't install CoreDNS you will need to install a cluster DNS provider yourself. - -Traefik -------- - -Traefik is deployed by default when starting the server; to disable it, start the server with the `--no-deploy traefik` option. - -Service Load Balancer ---------------------- - -k3s includes a basic service load balancer that uses available host ports. If you try to create -a load balancer that listens on port 80, for example, it will try to find a free host in the cluster -for port 80. If no port is available the load balancer will stay in Pending. - -To disable the embedded load balancer run the server with the `--no-deploy servicelb` option. This is necessary if you wish to run a different load balancer, such as MetalLB. - -Metrics Server --------------- - -To add functionality for commands such as `k3s kubectl top nodes` metrics-server must be installed, -to install see the instructions located at https://github.com/kubernetes-incubator/metrics-server/. - -**NOTE** : By default the image used in `metrics-server-deployment.yaml` is valid only for **amd64** devices, -this should be edited as appropriate for your architecture. As of this writing metrics-server provides -the following images relevant to k3s: `amd64:v0.3.3`, `arm64:v0.3.2`, and `arm:v0.3.2`. Further information -on the images provided through gcr.io can be found at https://console.cloud.google.com/gcr/images/google-containers/GLOBAL. - -Storage Backends ----------------- - -As of version 0.6.0, k3s can support various storage backends including: SQLite (default), MySQL, Postgres, and etcd, this enhancement depends on the following arguments that can be passed to k3s server: - -* `--storage-backend` _value_ - - Specify storage type etcd3 or kvsql [$`K3S_STORAGE_BACKEND`] - -* `--storage-endpoint` _value_ - - Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$`K3S_STORAGE_ENDPOINT`] - -* `--storage-cafile` _value_ - - SSL Certificate Authority file used to secure storage backend communication [$`K3S_STORAGE_CAFILE`] - -* `--storage-certfile` _value_ - - SSL certification file used to secure storage backend communication [$`K3S_STORAGE_CERTFILE`] - -* `--storage-keyfile` _value_ - - SSL key file used to secure storage backend communication [$`K3S_STORAGE_KEYFILE`] - -### MySQL - -To use k3s with MySQL storage backend, you can specify the following for insecure connection: - -``` - --storage-endpoint="mysql://" -``` -By default the server will attempt to connect to mysql using the mysql socket at `/var/run/mysqld/mysqld.sock` using the root user and with no password, k3s will also create a database with the name `kubernetes` if the database is not specified in the DSN. - -To override the method of connection, user/pass, and database name, you can provide a custom DSN, for example: - -``` - --storage-endpoint="mysql://k3suser:k3spass@tcp(192.168.1.100:3306)/k3stest" -``` - -This command will attempt to connect to MySQL on host `192.168.1.100` on port `3306` with username `k3suser` and password `k3spass` and k3s will automatically create a new database with the name `k3stest` if it doesn't exist, for more information about the MySQL driver data source name, please refer to https://github.com/go-sql-driver/mysql#dsn-data-source-name - -To connect to MySQL securely, you can use the following example: -``` - --storage-endpoint="mysql://k3suser:k3spass@tcp(192.168.1.100:3306)/k3stest" \ - --storage-cafile ca.crt \ - --storage-certfile mysql.crt \ - --storage-keyfile mysql.key -``` -The above command will use these certificates to generate the tls config to communicate with mysql securely. - - -### Postgres - -Connection to postgres can be established using the following command: - -``` - --storage-endpoint="postgres://" -``` - -By default the server will attempt to connect to postgres on localhost with using the `postgres` user and with `postgres` password, k3s will also create a database with the name `kubernetes` if the database is not specified in the DSN. - -To override the method of connection, user/pass, and database name, you can provide a custom DSN, for example: - -``` - --storage-endpoint="postgres://k3suser:k3spass@192.168.1.100:5432/k3stest" -``` - -This command will attempt to connect to Postgres on host `192.168.1.100` on port `5432` with username `k3suser` and password `k3spass` and k3s will automatically create a new database with the name `k3stest` if it doesn't exist, for more information about the Postgres driver data source name, please refer to https://godoc.org/github.com/lib/pq - -To connect to Postgres securely, you can use the following example: - -``` - --storage-endpoint="postgres://k3suser:k3spass@192.168.1.100:5432/k3stest" \ - --storage-certfile postgres.crt \ - --storage-keyfile postgres.key \ - --storage-cafile ca.crt -``` - -The above command will use these certificates to generate the tls config to communicate with postgres securely. - -### etcd - -Connection to etcd3 can be established using the following command: - -``` - --storage-backend=etcd3 \ - --storage-endpoint="https://127.0.0.1:2379" -``` -The above command will attempt to connect insecurely to etcd on localhost with port `2379`, you can connect securely to etcd using the following command: - -``` - --storage-backend=etcd3 \ - --storage-endpoint="https://127.0.0.1:2379" \ - --storage-cafile ca.crt \ - --storage-certfile etcd.crt \ - --storage-keyfile etcd.key -``` - -The above command will use these certificates to generate the tls config to communicate with etcd securely. diff --git a/content/k3s/latest/en/faq/_index.md b/content/k3s/latest/en/faq/_index.md new file mode 100644 index 00000000000..9de75aa8645 --- /dev/null +++ b/content/k3s/latest/en/faq/_index.md @@ -0,0 +1,22 @@ +--- +title: FAQ +weight: 60 +--- + +The FAQ is updated periodically and designed to answer the questions our users most frequently ask about K3s. + +**Is K3s a suitable replacement for k8s?** + +K3s is capable of nearly everything k8s can do. It is just a more lightweight version. See the [main]({{}}/k3s/latest/en/) docs page for more details. + +**How can I use my own Ingress instead of Traefik?** + +Simply start K3s server with `--no-deploy=traefik` and deploy your ingress. + +**Does K3s support Windows?** + +At this time K3s does not natively support Windows, however we are open to the idea in the future. + +**How can I build from source?** + +Please reference the K3s [BUILDING.md](https://github.com/rancher/k3s/blob/master/BUILDING.md) with instructions. diff --git a/content/k3s/latest/en/installation/_index.md b/content/k3s/latest/en/installation/_index.md index 22d99bf2cd7..f631fe9c543 100644 --- a/content/k3s/latest/en/installation/_index.md +++ b/content/k3s/latest/en/installation/_index.md @@ -1,349 +1,19 @@ --- -title: "Installation Options" -weight: 2 +title: "Installation" +weight: 20 --- -This section contains information on flags and environment variables used for starting a k3s cluster. +This section contains instructions for installing K3s in various environments. Please ensure you have met the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/) before you begin installing K3s. -Install Script --------------- +[Installation and Configuration Options]({{< baseurl >}}/k3s/latest/en/installation/install-options/) provides guidance on the options available to you when installing K3s. -The install script will attempt to download the latest release, to specify a specific -version for download we can use the `INSTALL_K3S_VERSION` environment variable, for example: -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - -``` -To install just the server without an agent we can add a `INSTALL_K3S_EXEC` -environment variable to the command: -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable-agent" sh - -``` +[High Availability with an External DB]({{< baseurl >}}/k3s/latest/en/installation/ha/) details how to setup an HA K3s cluster backed by an external datastore such as MySQL, PostgreSQL, or etcd. -The installer can also be run without performing downloads by setting `INSTALL_K3S_SKIP_DOWNLOAD=true`, for example: -```sh -curl -sfL https://github.com/rancher/k3s/releases/download/vX.Y.Z/k3s -o /usr/local/bin/k3s -chmod 0755 /usr/local/bin/k3s +[High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) details how to setup an HA K3s cluster that leverages a built-in distributed database. -curl -sfL https://get.k3s.io -o install-k3s.sh -chmod 0755 install-k3s.sh +[Air-Gap Installation]({{< baseurl >}}/k3s/latest/en/installation/airgap/) details how to setup K3s in environments that do not have direct access to the Internet. -export INSTALL_K3S_SKIP_DOWNLOAD=true -./install-k3s.sh -``` +### Uninstalling -The full help text for the install script environment variables are as follows: - - `K3S_*` - - Environment variables which begin with `K3S_` will be preserved for the - systemd service to use. Setting `K3S_URL` without explicitly setting - a systemd exec command will default the command to "agent", and we - enforce that `K3S_TOKEN` or `K3S_CLUSTER_SECRET` is also set. - - - `INSTALL_K3S_SKIP_DOWNLOAD` - - If set to true will not download k3s hash or binary. - - - INSTALL_K3S_SYMLINK - - If set to 'skip' will not create symlinks, 'force' will overwrite, - default will symlink if command does not exist in path. - - - `INSTALL_K3S_VERSION` - - Version of k3s to download from github. Will attempt to download the - latest version if not specified. - - - `INSTALL_K3S_BIN_DIR` - - Directory to install k3s binary, links, and uninstall script to, or use - /usr/local/bin as the default - - - `INSTALL_K3S_SYSTEMD_DIR` - - Directory to install systemd service and environment files to, or use - /etc/systemd/system as the default - - - `INSTALL_K3S_EXEC` or script arguments - - Command with flags to use for launching k3s in the systemd service, if - the command is not specified will default to "agent" if `K3S_URL` is set - or "server" if not. The final systemd command resolves to a combination - of EXEC and script args ($@). - - The following commands result in the same behavior: - ```sh - curl ... | INSTALL_K3S_EXEC="--disable-agent" sh -s - - curl ... | INSTALL_K3S_EXEC="server --disable-agent" sh -s - - curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable-agent - curl ... | sh -s - server --disable-agent - curl ... | sh -s - --disable-agent - ``` - - - `INSTALL_K3S_NAME` - - Name of systemd service to create, will default from the k3s exec command - if not specified. If specified the name will be prefixed with 'k3s-'. - - - `INSTALL_K3S_TYPE` - - Type of systemd service to create, will default from the k3s exec command - if not specified. - -Server Options --------------- - -The following information on server options is also available through `k3s server --help` : - -* `--bind-address` _value_ - - k3s bind address (default: localhost) - -* `--https-listen-port` _value_ - - HTTPS listen port (default: 6443) - -* `--http-listen-port` _value_ - - HTTP listen port (for /healthz, HTTPS redirect, and port for TLS terminating LB) (default: 0) - -* `--data-dir` _value_, `-d` _value_ - - Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root - -* `--disable-agent` - - Do not run a local agent and register a local kubelet - -* `--log` _value_, `-l` _value_ - - Log to file - -* `--cluster-cidr` _value_ - - Network CIDR to use for pod IPs (default: "10.42.0.0/16") - -* `--cluster-secret` _value_ - - Shared secret used to bootstrap a cluster [$`K3S_CLUSTER_SECRET`] - -* `--service-cidr` _value_ - - Network CIDR to use for services IPs (default: "10.43.0.0/16") - -* `--cluster-dns` _value_ - - Cluster IP for coredns service. Should be in your service-cidr range - -* `--cluster-domain` _value_ - - Cluster Domain (default: "cluster.local") - -* `--no-deploy` _value_ - - Do not deploy packaged components (valid items: coredns, servicelb, traefik) - -* `--write-kubeconfig` _value_, `-o` _value_ - - Write kubeconfig for admin client to this file [$`K3S_KUBECONFIG_OUTPUT`] - -* `--write-kubeconfig-mode` _value_ - - Write kubeconfig with this mode [$`K3S_KUBECONFIG_MODE`] - -* `--tls-san` _value_ - - Add additional hostname or IP as a Subject Alternative Name in the TLS cert - -* `--kube-apiserver-arg` _value_ - - Customized flag for kube-apiserver process - -* `--kube-scheduler-arg` _value_ - - Customized flag for kube-scheduler process - -* `--kube-controller-arg` _value_ - - Customized flag for kube-controller-manager process - -* `--rootless` - - (experimental) Run rootless - -* `--storage-backend` _value_ - - Specify storage type etcd3 or kvsql [$`K3S_STORAGE_BACKEND`] - -* `--storage-endpoint` _value_ - - Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$`K3S_STORAGE_ENDPOINT`] - -* `--storage-cafile` _value_ - - SSL Certificate Authority file used to secure storage backend communication [$`K3S_STORAGE_CAFILE`] - -* `--storage-certfile` _value_ - - SSL certification file used to secure storage backend communication [$`K3S_STORAGE_CERTFILE`] - -* `--storage-keyfile` _value_ - - SSL key file used to secure storage backend communication [$`K3S_STORAGE_KEYFILE`] - -* `--node-ip` _value_, `-i` _value_ - - (agent) IP address to advertise for node - -* `--node-name` _value_ - - (agent) Node name [$`K3S_NODE_NAME`] - -* `--docker` - - (agent) Use docker instead of containerd - -* `--no-flannel` - - (agent) Disable embedded flannel - -* `--flannel-iface` _value_ - - (agent) Override default flannel interface - -* `--container-runtime-endpoint` _value_ - - (agent) Disable embedded containerd and use alternative CRI implementation - -* `--pause-image` _value_ - - (agent) Customized pause image for containerd sandbox - -* `--resolv-conf` _value_ - - (agent) Kubelet resolv.conf file [$`K3S_RESOLV_CONF`] - -* `--kubelet-arg` _value_ - - (agent) Customized flag for kubelet process - -* `--kube-proxy-arg` _value_ - - (agent) Customized flag for kube-proxy process - -* `--node-label` _value_ - - (agent) Registering kubelet with set of labels - -* `--node-taint` _value_ - - (agent) Registering kubelet with set of taints - -Agent Options ------------------- - -The following information on agent options is also available through `k3s agent --help` : - -* `--token` _value_, `-t` _value_ - - Token to use for authentication [$`K3S_TOKEN`] - -* `--token-file` _value_ - - Token file to use for authentication [$`K3S_TOKEN_FILE`] - -* `--server` _value_, `-s` _value_ - - Server to connect to [$`K3S_URL`] - -* `--data-dir` _value_, `-d` _value_ - - Folder to hold state (default: "/var/lib/rancher/k3s") - -* `--cluster-secret` _value_ - - Shared secret used to bootstrap a cluster [$`K3S_CLUSTER_SECRET`] - -* `--rootless` - - (experimental) Run rootless - -* `--docker` - - (agent) Use docker instead of containerd - -* `--no-flannel` - - (agent) Disable embedded flannel - -* `--flannel-iface` _value_ - - (agent) Override default flannel interface - -* `--node-name` _value_ - - (agent) Node name [$`K3S_NODE_NAME`] - -* `--node-ip` _value_, `-i` _value - - (agent) IP address to advertise for node - -* `--container-runtime-endpoint` _value_ - - (agent) Disable embedded containerd and use alternative CRI implementation - -* `--pause-image` _value_ - - (agent) Customized pause image for containerd sandbox - -* `--resolv-conf` _value_ - - (agent) Kubelet resolv.conf file [$`K3S_RESOLV_CONF`] - -* `--kubelet-arg` _value_ - - (agent) Customized flag for kubelet process - -* `--kube-proxy-arg` _value_ - - (agent) Customized flag for kube-proxy process - -* `--node-label` _value_ - - (agent) Registering kubelet with set of labels - -* `--node-taint` _value_ - - (agent) Registering kubelet with set of taints - -Customizing components ----------------------- - -As of v0.3.0 any of the following processes can be customized with extra flags: - -* `--kube-apiserver-arg` _value_ - - (server) [kube-apiserver options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) - -* `--kube-controller-arg` _value_ - - (server) [kube-controller-manager options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) - -* `--kube-scheduler-arg` _value_ - - (server) [kube-scheduler options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) - -* `--kubelet-arg` _value_ - - (agent) [kubelet options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) - -* `--kube-proxy-arg` _value_ - - (agent) [kube-proxy options](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/) - -Adding extra arguments can be done by passing the following flags to server or agent. -For example to add the following arguments `-v=9` and `log-file=/tmp/kubeapi.log` to the kube-apiserver, you should add the following options to k3s server: - -``` ---kube-apiserver-arg v=9 --kube-apiserver-arg log-file=/tmp/kubeapi.log -``` +If you installed K3s with the help of the `install.sh` script, an uninstall script is generated during installation, which will be created on your node at `/usr/local/bin/k3s-uninstall.sh` (or as `k3s-agent-uninstall.sh`). diff --git a/content/k3s/latest/en/installation/airgap/_index.md b/content/k3s/latest/en/installation/airgap/_index.md new file mode 100644 index 00000000000..f919b3768ce --- /dev/null +++ b/content/k3s/latest/en/installation/airgap/_index.md @@ -0,0 +1,77 @@ +--- +title: "Air-Gap Install" +weight: 60 +--- + +In this guide, we are assuming you have created your nodes in your air-gap environment and have a secure Docker private registry on your bastion server. + +Installation Outline +-------------------- +1. Prepare Images Directory +2. Create Registry YAML +3. Install K3s + +### Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA) + +### Install K3s + +Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, name it `install.sh`. + +Install K3s on each node. The example below shows how to do this for a server or an agent (worker): + +``` +# K3s Server +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh + +# K3s Agent +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + + +>**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. + +# Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). diff --git a/content/k3s/latest/en/installation/datastore/_index.md b/content/k3s/latest/en/installation/datastore/_index.md new file mode 100644 index 00000000000..6e5ac992ab3 --- /dev/null +++ b/content/k3s/latest/en/installation/datastore/_index.md @@ -0,0 +1,97 @@ +--- +title: "Cluster Datastore Options" +weight: 50 +--- + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available datastore options allow you to select a datastore that best fits your use case. For example: + +* If your team doesn't have expertise in operating etcd, you can choose an enterprise-grade SQL database like MySQL or PostgreSQL +* If you need to run a simple, short-lived cluster in your CI/CD environment, you can use the embedded SQLite database +* If you wish to deploy Kubernetes on the edge and require a highly available solution but can't afford the operational overhead of managing a database at the edge, you can use K3s's embedded HA datastore built on top of DQLite (currently experimental) + +K3s supports the following datastore options: + +* Embedded [SQLite](https://www.sqlite.org/index.html) +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) +* Embedded [DQLite](https://dqlite.io/) for High Availability (experimental) + +### External Datastore Configuration Parameters +If you wish to use an external datastore such as PostgreSQL, MySQL, or etcd you must set the `datastore-endpoint` parameter so that K3s knows how to connect to it. You may also specify parameters to configure the authentication and encryption of the connection. The below table summarizes these parameters, which can be passed as either CLI flags or environment variables. + + CLI Flag | Environment Variable | Description + ------------|-------------|------------------ + `--datastore-endpoint` | `K3S_DATASTORE_ENDPOINT` | Specify a PostgresSQL, MySQL, or etcd connection string. This is a string used to describe the connection to the datastore. The structure of this string is specific to each backend and is detailed below. + `--datastore-cafile` | `K3S_DATASTORE_CAFILE` | TLS Certificate Authority (CA) file used to help secure communication with the datastore. If your datastore serves requests over TLS using a certificate signed by a custom certificate authority, you can specify that CA using this parameter so that the K3s client can properly verify the certificate. | +| `--datastore-certfile` | `K3S_DATASTORE_CERTFILE` | TLS certificate file used for client certificate based authentication to your datastore. To use this feature, your datastore must be configured to support client certificate based authentication. If you specify this parameter, you must also specify the `datastore-keyfile` parameter. | +| `--datastore-keyfile` | `K3S_DATASTORE_KEYFILE` | TLS key file used for client certificate based authentication to your datastore. See the previous `datastore-certfile` parameter for more details. | + +As a best practice we recommend setting these parameters as environment variables rather than command line arguments so that your database credentials or other sensitive information aren't exposed as part of the process info. + +### Datastore Endpoint Format and Functionality +As mentioned, the format of the value passed to the `datastore-endpoint` parameter is dependent upon the datastore backend. The following details this format and functionality for each supported external datastore. + +{{% tabs %}} +{{% tab "PostgreSQL" %}} + +In its most common form, the datastore-endpoint parameter for PostgreSQL has the following format: + +`postgres://username:password@hostname:port/database-name` + +More advanced configuration parameters are available. For more information on these, please see https://godoc.org/github.com/lib/pq. + +If you specify a database name and it does not exist, the server will attempt to create it. + +If you only supply `postgres://` as the endpoint, K3s will attempt to do the following: + +* Connect to localhost using `postgres` as the username and password +* Create a database named `kubernetes` + + +{{% /tab %}} +{{% tab "MySQL" %}} + +In its most common form, the `datastore-endpoint` parameter for MySQL has the following format: + +`mysql://username:password@tcp(hostname:3306)/database-name` + +More advanced configuration parameters are available. For more information on these, please see https://github.com/go-sql-driver/mysql#dsn-data-source-name + +Note that due to a [known issue](https://github.com/rancher/k3s/issues/1093) in K3s, you cannot set the `tls` parameter. TLS communication is supported, but you cannot, for example, set this parameter to "skip-verify" to cause K3s to skip certificate verification. + +If you specify a database name and it does not exist, the server will attempt to create it. + +If you only supply `mysql://` as the endpoint, K3s will attempt to do the following: + +* Connect to the MySQL socket at `/var/run/mysqld/mysqld.sock` using the `root` user and no password +* Create a database with the name `kubernetes` + + +{{% /tab %}} +{{% tab "etcd" %}} + +In its most common form, the `datastore-endpoint` parameter for etcd has the following format: + +`https://etcd-host-1:2379,https://etcd-host-2:2379,https://etcd-host-3:2379` + +The above assumes a typical three node etcd cluster. The parameter can accept one more comma separated etcd URLs. + +{{% /tab %}} +{{% /tabs %}} + +
Based on the above, the following example command could be used to launch a server instance that connects to a PostgresSQL database named k3s-db: +``` +K3S_DATASTORE_ENDPOINT='postgres://username:password@hostname:5432/k3s-db' k3s server +``` + +And the following example could be used to connect to a MySQL database using client certificate authentication: +``` +K3S_DATASTORE_ENDPOINT='mysql://username:password@tcp(hostname:3306)/k3s-db' \ +K3S_DATASTORE_CERTFILE='/path/to/client.crt' \ +K3S_DATASTORE_KEYFILE='/path/to/client.key' \ +k3s server +``` + +### Embedded DQLite for HA (Experimental) +K3s's use of DQLite is similar to its use of SQLite. It is simple to setup and manage. As such, there is no external configuration or additional steps to take in order to use this option. Please see [High Availability with Embedded DB (Experimental)]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded/) for instructions on how to run with this option. diff --git a/content/k3s/latest/en/installation/ha-embedded/_index.md b/content/k3s/latest/en/installation/ha-embedded/_index.md new file mode 100644 index 00000000000..58526b65280 --- /dev/null +++ b/content/k3s/latest/en/installation/ha-embedded/_index.md @@ -0,0 +1,22 @@ +--- +title: "High Availability with Embedded DB (Experimental)" +weight: 40 +--- + +As of v1.0.0, K3s is previewing support for running a highly available control plane without the need for an external database. This means there is no need to manage an external etcd or SQL datastore in order to run a reliable production-grade setup. While this feature is currently experimental, we expect it to be the primary architecture for running HA K3s clusters in the future. + +This architecture is achieved by embedding a dqlite database within the K3s server process. DQLite is short for "distributed SQLite." According to https://dqlite.io, it is "*a fast, embedded, persistent SQL database with Raft consensus that is perfect for fault-tolerant IoT and Edge devices.*" This makes it a natural fit for K3s. + +To run K3s in this mode, you must have an odd number of server nodes. We recommend starting with three nodes. + +To get started, first launch a server node with the `cluster-init` flag to enable clustering and a token that will be used as a shared secret to join additional servers to the cluster. +``` +K3S_TOKEN=SECRET k3s server --cluster-init +``` + +After launching the first server, join the second and third servers to the cluster using the shared secret: +``` +K3S_TOKEN=SECRET k3s server --server https://:6443 +``` + +Now you have a highly available control plane. Joining additional worker nodes to the cluster follows the same procedure as a single server cluster. diff --git a/content/k3s/latest/en/installation/ha/_index.md b/content/k3s/latest/en/installation/ha/_index.md new file mode 100644 index 00000000000..81adf97d1a7 --- /dev/null +++ b/content/k3s/latest/en/installation/ha/_index.md @@ -0,0 +1,57 @@ +--- +title: "High Availability with an External DB" +weight: 30 +--- + +>**Note:** Official support for High-Availability (HA) was introduced in our v1.0.0 release. + +Single server clusters can meet a variety of use cases, but for environments where uptime of the Kubernetes control plane is critical, you can run K3s in an HA configuration. An HA K3s cluster is comprised of: + +* Two or more **server nodes** that will serve the Kubernetes API and run other control plane services +* An **external datastore** (as opposed to the embedded SQLite datastore used in single server setups) +* A **fixed registration address** placed in front of the server nodes to allow worker nodes to register with the cluster + +The following diagram illustrates the above configuration: +![k3s HA]({{< baseurl >}}/img/k3s/k3s-production-setup.svg) + +In this architecture a server node is defined as a machine (bare-metal or virtual) running the `k3s server` command. A worker node is defined as a machine running the `k3s agent` command. + +Workers register through the fixed registration address, but after registration they establish a connection directly to one of the sever nodes. This is a websocket connection initiated by the `k3s agent` process and it is maintained by a client-side load balancer running as part of the agent process. + +Installation Outline +-------------------- +Setting up an HA cluster requires the following steps: + +1. Create an external datastore +2. Launch server nodes +3. Configure fixed registration address +4. Join worker nodes + +### Create an External Datastore +You will first need to create an external datastore for the cluster. See the [Cluster Datastore Options]({{< baseurl >}}/k3s/latest/en/installation/datastore/) documentation for more details. + +### Launch Server Nodes +K3s requires two or more server nodes for this HA configuration. See the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/) guide for minimum machine requirements. + +When running the `k3s server` command on these nodes, you must set the `datastore-endpoint` parameter so that K3s knows how to connect to the external datastore. Please see the [datastore configuration guide]({{< baseurl >}}/k3s/latest/en/installation/datastore/#external-datastore-configuration-parameters) for information on configuring this parameter. + +> **Note:** The same installation options available to single-server installs are also available for HA installs. For more details, see the [Installation and Configuration Options]({{< baseurl >}}/k3s/latest/en/installation/install-options/) documentation. + +By default, server nodes will be schedulable and thus your workloads can get launched on them. If you wish to have a dedicated control plane where no user workloads will run, you can use taints. The `node-taint` parameter will allow you to configure nodes with taints, for example `--node-taint k3s-controlplane=true:NoExecute`. + +Once you've launched the `k3s server` process on all server nodes, you can ensure that the cluster has come up properly by checking that the nodes are in the Ready state with `k3s kubectl get nodes`. + +### Configure the Fixed Registration Address +Worker nodes need a URL to register against. This can be the IP or hostname of any of the server nodes, but in many cases those may change over time. For example, if you are running your cluster in a cloud that supports scaling groups, you may scale the server node group up and down over time, causing nodes to be created and destroyed and thus having different IPs from the initial set of server nodes. Therefore, you should have a stable endpoint in front of the server nodes that will not change over time. This endpoint can be setup using any number approaches, such as: + +* A layer-4 (TCP) load balancer +* Round-robin DNS +* A virtual or elastic IP addresses + +This endpoint can also be used for accessing the Kubernetes API. So you can, for example, modify your kubeconfig file to point to it instead of a specific node. + +### Join Worker Nodes +Joining worker nodes in an HA cluster is the same as joining worker nodes in a single server cluster. You just need to specify the URL the agent should register to and the token it should use. +``` +K3S_TOKEN=SECRET k3s agent --server https://fixed-registration-address:6443 +``` diff --git a/content/k3s/latest/en/installation/install-options/_index.md b/content/k3s/latest/en/installation/install-options/_index.md new file mode 100644 index 00000000000..b9c3aa859e2 --- /dev/null +++ b/content/k3s/latest/en/installation/install-options/_index.md @@ -0,0 +1,183 @@ +--- +title: "Installation and Configuration Options" +weight: 20 +--- + +### Installation script options + +As mentioned in the [Quick-Start Guide]({{< baseurl >}}/k3s/latest/en/quick-start/), you can use the installation script available at https://get.k3s.io to install K3s as a service on systemd and openrc based systems. + +The simplest form of this command is as follows: +```sh +curl -sfL https://get.k3s.io | sh - +``` + +When using this method to install K3s, the following environment variables can be used to configure the installation: + +- `INSTALL_K3S_SKIP_DOWNLOAD` + + If set to true will not download K3s hash or binary. + +- `INSTALL_K3S_SYMLINK` + + If set to 'skip' will not create symlinks, 'force' will overwrite, default will symlink if command does not exist in path. + +- `INSTALL_K3S_SKIP_START` + + If set to true will not start K3s service. + +- `INSTALL_K3S_VERSION` + + Version of K3s to download from github. Will attempt to download the latest version if not specified. + +- `INSTALL_K3S_BIN_DIR` + + Directory to install K3s binary, links, and uninstall script to, or use `/usr/local/bin` as the default. + +- `INSTALL_K3S_BIN_DIR_READ_ONLY` + + If set to true will not write files to `INSTALL_K3S_BIN_DIR`, forces setting INSTALL_K3S_SKIP_DOWNLOAD=true. + +- `INSTALL_K3S_SYSTEMD_DIR` + + Directory to install systemd service and environment files to, or use `/etc/systemd/system` as the default. + +- `INSTALL_K3S_EXEC` + + Command with flags to use for launching K3s in the service. If the command is not specified, it will default to "agent" if `K3S_URL` is set or "server" if it is not set. The final systemd command resolves to a combination of this environment variable and script args. To illustrate this, the following commands result in the same behavior: + ```sh + curl ... | INSTALL_K3S_EXEC="--no-flannel" sh -s - + curl ... | INSTALL_K3S_EXEC="server --no-flannel" sh -s - + curl ... | INSTALL_K3S_EXEC="server" sh -s - --no-flannel + curl ... | sh -s - server --no-flannel + curl ... | sh -s - --no-flannel + ``` + + - `INSTALL_K3S_NAME` + + Name of systemd service to create, will default from the K3s exec command if not specified. If specified the name will be prefixed with 'k3s-'. + + - `INSTALL_K3S_TYPE` + + Type of systemd service to create, will default from the K3s exec command if not specified. + + +Environment variables which begin with `K3S_` will be preserved for the systemd and openrc services to use. Setting `K3S_URL` without explicitly setting an exec command will default the command to "agent". When running the agent `K3S_TOKEN` must also be set. + + +### Beyond the Installation Script +As stated, the installation script is primarily concerned with configuring K3s to run as a service. If you choose to not use the script, you can run K3s simply by downloading the binary from our [release page](https://github.com/rancher/k3s/releases/latest), placing it on your path, and executing it. The K3s binary supports the following commands: + +Command | Description +--------|------------------ +`k3s server` | Run the K3s management server, which will also launch Kubernetes control plane components such as the API server, controller-manager, and scheduler. +`k3s agent` | Run the K3s node agent. This will cause K3s to run as a worker node, launching the Kubernetes node services `kubelet` and `kube-proxy`. +`k3s kubectl` | Run an embedded [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) CLI. If the `KUBECONFIG` environment variable is not set, this will automatically attempt to use the config file that is created at `/etc/rancher/k3s/k3s.yaml` when launching a K3s server node. +`k3s crictl` | Run an embedded [crictl](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md). This is a CLI for interacting with Kubernetes's container runtime interface (CRI). Useful for debugging. +`k3s ctr` | Run an embedded [ctr](https://github.com/projectatomic/containerd/blob/master/docs/cli.md). This is a CLI for containerd, the container daemon used by K3s. Useful for debugging. +`k3s help` | Shows a list of commands or help for one command + +The `k3s server` and `k3s agent` commands have additional configuration options that can be viewed with `k3s server --help` or `k3s agent --help`. For convenience, that help text is presented here: + +### `k3s server` +``` +NAME: + k3s server - Run management server + +USAGE: + k3s server [OPTIONS] + +OPTIONS: + -v value (logging) Number for the log level verbosity (default: 0) + --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging + --log value, -l value (logging) Log to file + --alsologtostderr (logging) Log to standard error as well as file (if set) + --bind-address value (listener) k3s bind address (default: 0.0.0.0) + --https-listen-port value (listener) HTTPS listen port (default: 6443) + --advertise-address value (listener) IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip) + --advertise-port value (listener) Port that apiserver uses to advertise to members of the cluster (default: listen-port) (default: 0) + --tls-san value (listener) Add additional hostname or IP as a Subject Alternative Name in the TLS cert + --data-dir value, -d value (data) Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root + --cluster-cidr value (networking) Network CIDR to use for pod IPs (default: "10.42.0.0/16") + --service-cidr value (networking) Network CIDR to use for services IPs (default: "10.43.0.0/16") + --cluster-dns value (networking) Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10) + --cluster-domain value (networking) Cluster Domain (default: "cluster.local") + --flannel-backend value (networking) One of 'none', 'vxlan', 'ipsec', or 'flannel' (default: "vxlan") + --token value, -t value (cluster) Shared secret used to join a server or agent to a cluster [$K3S_TOKEN] + --token-file value (cluster) File containing the cluster-secret/token [$K3S_TOKEN_FILE] + --write-kubeconfig value, -o value (client) Write kubeconfig for admin client to this file [$K3S_KUBECONFIG_OUTPUT] + --write-kubeconfig-mode value (client) Write kubeconfig with this mode [$K3S_KUBECONFIG_MODE] + --kube-apiserver-arg value (flags) Customized flag for kube-apiserver process + --kube-scheduler-arg value (flags) Customized flag for kube-scheduler process + --kube-controller-manager-arg value (flags) Customized flag for kube-controller-manager process + --kube-cloud-controller-manager-arg value (flags) Customized flag for kube-cloud-controller-manager process + --datastore-endpoint value (db) Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$K3S_DATASTORE_ENDPOINT] + --datastore-cafile value (db) TLS Certificate Authority file used to secure datastore backend communication [$K3S_DATASTORE_CAFILE] + --datastore-certfile value (db) TLS certification file used to secure datastore backend communication [$K3S_DATASTORE_CERTFILE] + --datastore-keyfile value (db) TLS key file used to secure datastore backend communication [$K3S_DATASTORE_KEYFILE] + --default-local-storage-path value (storage) Default local storage path for local provisioner storage class + --no-deploy value (components) Do not deploy packaged components (valid items: coredns, servicelb, traefik, local-storage, metrics-server) + --disable-scheduler (components) Disable Kubernetes default scheduler + --disable-cloud-controller (components) Disable k3s default cloud controller manager + --disable-network-policy (components) Disable k3s default network policy controller + --node-name value (agent/node) Node name [$K3S_NODE_NAME] + --with-node-id (agent/node) Append id to node name + --node-label value (agent/node) Registering kubelet with set of labels + --node-taint value (agent/node) Registering kubelet with set of taints + --docker (agent/runtime) Use docker instead of containerd + --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation + --pause-image value (agent/runtime) Customized pause image for containerd sandbox + --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") + --node-ip value, -i value (agent/networking) IP address to advertise for node + --node-external-ip value (agent/networking) External IP address to advertise for node + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Override default flannel interface + --flannel-conf value (agent/networking) Override default flannel config file + --kubelet-arg value (agent/flags) Customized flag for kubelet process + --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process + --rootless (experimental) Run rootless + --agent-token value (experimental/cluster) Shared secret used to join agents to the cluster, but not servers [$K3S_AGENT_TOKEN] + --agent-token-file value (experimental/cluster) File containing the agent secret [$K3S_AGENT_TOKEN_FILE] + --server value, -s value (experimental/cluster) Server to connect to, used to join a cluster [$K3S_URL] + --cluster-init (experimental/cluster) Initialize new cluster master [$K3S_CLUSTER_INIT] + --cluster-reset (experimental/cluster) Forget all peers and become a single cluster new cluster master [$K3S_CLUSTER_RESET] + --no-flannel (deprecated) use --flannel-backend=none + --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] +``` + +### `k3s agent` +``` +NAME: + k3s agent - Run node agent + +USAGE: + k3s agent [OPTIONS] + +OPTIONS: + -v value (logging) Number for the log level verbosity (default: 0) + --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging + --log value, -l value (logging) Log to file + --alsologtostderr (logging) Log to standard error as well as file (if set) + --token value, -t value (cluster) Token to use for authentication [$K3S_TOKEN] + --token-file value (cluster) Token file to use for authentication [$K3S_TOKEN_FILE] + --server value, -s value (cluster) Server to connect to [$K3S_URL] + --data-dir value, -d value (agent/data) Folder to hold state (default: "/var/lib/rancher/k3s") + --node-name value (agent/node) Node name [$K3S_NODE_NAME] + --with-node-id (agent/node) Append id to node name + --node-label value (agent/node) Registering kubelet with set of labels + --node-taint value (agent/node) Registering kubelet with set of taints + --docker (agent/runtime) Use docker instead of containerd + --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation + --pause-image value (agent/runtime) Customized pause image for containerd sandbox + --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") + --node-ip value, -i value (agent/networking) IP address to advertise for node + --node-external-ip value (agent/networking) External IP address to advertise for node + --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] + --flannel-iface value (agent/networking) Override default flannel interface + --flannel-conf value (agent/networking) Override default flannel config file + --kubelet-arg value (agent/flags) Customized flag for kubelet process + --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process + --rootless (experimental) Run rootless + --no-flannel (deprecated) use --flannel-backend=none + --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] +``` diff --git a/content/k3s/latest/en/installation/node-requirements/_index.md b/content/k3s/latest/en/installation/node-requirements/_index.md new file mode 100644 index 00000000000..25e2a8b3119 --- /dev/null +++ b/content/k3s/latest/en/installation/node-requirements/_index.md @@ -0,0 +1,38 @@ +--- +title: Node Requirements +weight: 1 +--- + +K3s is very lightweight, but has some minimum requirements as outlined below. + +Whether you're configuring a K3s cluster to run in a single-node or high-availability (HA) setup, each node running K3s should meet the following minimum requirements. You may need more resources to fit your needs. + +## Prerequisites +* Two nodes cannot have the same hostname. If all your nodes have the same hostname, pass `--node-name` or set `$K3S_NODE_NAME` with a unique name for each node you add to the cluster. + +## Operating Systems + +K3s should run on just about any flavor of Linux. However, K3s is tested on the following operating systems and their subsequent non-major releases. + +* Ubuntu 16.04 (amd64) +* Ubuntu 18.04 (amd64) +* Raspbian Buster (armhf) + +## Hardware + +Hardware requirements scale based on the size of your deployments. Minimum recommendations are outlined here. + +* RAM: 512MB Minimum +* CPU: 1 Minimum + +#### Disks + +K3s performance depends on the performance of the database. To ensure optimal speed, we recommend using an SSD when possible. Disk performance will vary on ARM devices utilizing an SD card or eMMC. + +## Networking + +The K3s server needs port 6443 to be accessible by the nodes. The nodes need to be able to reach other nodes over UDP port 8472 (Flannel VXLAN). If you do not use flannel and provide your own custom CNI, then port 8472 is not needed by K3s. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. + +IMPORTANT: The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disabled access to port 8472. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. diff --git a/content/k3s/latest/en/known-issues/_index.md b/content/k3s/latest/en/known-issues/_index.md new file mode 100644 index 00000000000..c79f84bc653 --- /dev/null +++ b/content/k3s/latest/en/known-issues/_index.md @@ -0,0 +1,13 @@ +--- +title: Known Issues +weight: 70 +--- +The Known Issues are updated periodically and designed to inform you about any issues that may not be immediately addressed in the next upcoming release. + +**Snap Docker** + +If you plan to use K3s with docker, Docker installed via a snap package is not recommended as it has been known to cause issues running K3s. + +**Iptables** + +If you are running iptables in nftables mode instead of legacy you might encounter issues. We recommend utilizing newer iptables (such as 1.6.1+) to avoid issues. diff --git a/content/k3s/latest/en/networking/_index.md b/content/k3s/latest/en/networking/_index.md new file mode 100644 index 00000000000..4c6ae853e43 --- /dev/null +++ b/content/k3s/latest/en/networking/_index.md @@ -0,0 +1,43 @@ +--- +title: "Networking" +weight: 35 +--- + +Open Ports +---------- +Please reference the [Node Requirements]({{< baseurl >}}/k3s/latest/en/installation/node-requirements/#networking) page for port information. + +Flannel +------- + +Flannel is included by default, if you don't want flannel then run each agent with `--no-flannel` option. + +In this setup you will still be required to install your own CNI driver. More info [here](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network) + +CoreDNS +------- + +CoreDNS is deployed on start of the agent, to disable run each server with the `--no-deploy coredns` option. + +If you don't install CoreDNS you will need to install a cluster DNS provider yourself. + +Traefik Ingress Controller +-------------------------- + +Traefik is deployed by default when starting the server. For more information see [Auto Deploying Manifests]({{< baseurl >}}/k3s/latest/en/configuration/#auto-deploying-manifests). The default config file is found in `/var/lib/rancher/k3s/server/manifests/traefik.yaml` and any changes made to this file will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`. + +The Traefik ingress controller will use ports 80, 443, and 8080 on the host (i.e. these will not be usable for HostPort or NodePort). + +You can tweak traefik to meet your needs by setting options in the traefik.yaml file. +Reference the official [Traefik for Helm Configuration Parameters](https://github.com/helm/charts/tree/master/stable/traefik#configuration) readme for more information. + +To disable it, start each server with the `--no-deploy traefik` option. + +Service Load Balancer +--------------------- + +K3s includes a basic service load balancer that uses available host ports. If you try to create +a load balancer that listens on port 80, for example, it will try to find a free host in the cluster +for port 80. If no port is available the load balancer will stay in Pending. + +To disable the embedded load balancer run the server with the `--no-deploy servicelb` option. This is necessary if you wish to run a different load balancer, such as MetalLB. diff --git a/content/k3s/latest/en/quick-start/_index.md b/content/k3s/latest/en/quick-start/_index.md index e24d414c2c1..e4e8156bf34 100644 --- a/content/k3s/latest/en/quick-start/_index.md +++ b/content/k3s/latest/en/quick-start/_index.md @@ -1,44 +1,30 @@ --- -title: "Quick-Start" -weight: 1 +title: "Quick-Start Guide" +weight: 10 --- -There are many ways to run k3s, we cover a couple easy ways to get started in this section. -The [installation options](../installation) section will cover in greater detail how k3s can be setup. +>**Note:** This guide will help you quickly launch a cluster with default options. The [installation section](../installation) covers in greater detail how K3s can be set up. + +> New to Kubernetes? The official Kubernetes docs already have some great tutorials outlining the basics [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/). Install Script -------------- -The k3s `install.sh` script provides a convenient way for installing to systemd or openrc, -to install k3s as a service just run: +K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run: ```bash curl -sfL https://get.k3s.io | sh - ``` -A kubeconfig file is written to `/etc/rancher/k3s/k3s.yaml` and the service is automatically started or restarted. -The install script will install k3s and additional utilities, such as `kubectl`, `crictl`, `k3s-killall.sh`, and `k3s-uninstall.sh`, for example: +After running this installation: + +* The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed +* Additional utilities will be installed, including `kubectl`, `crictl`, `ctr`, `k3s-killall.sh`, and `k3s-uninstall.sh` +* A kubeconfig file will be written to `/etc/rancher/k3s/k3s.yaml` and the kubectl installed by K3s will automatically use it + +To install on worker nodes and add them to the cluster, run the installation script with the `K3S_URL` and `K3S_TOKEN` environment variables. Here is an example showing how to join a worker node: ```bash -sudo kubectl get nodes +curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh - ``` +Setting the `K3S_URL` parameter causes K3s to run in worker mode. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for `K3S_TOKEN` is stored at `/var/lib/rancher/k3s/server/node-token` on your server node. -`K3S_TOKEN` is created at `/var/lib/rancher/k3s/server/node-token` on your server. -To install on worker nodes we should pass `K3S_URL` along with -`K3S_TOKEN` or `K3S_CLUSTER_SECRET` environment variables, for example: -```bash -curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=XXX sh - -``` - -Manual Download ---------------- -1. Download `k3s` from latest [release](https://github.com/rancher/k3s/releases/latest), x86_64, armhf, and arm64 are supported. -2. Run server. - -```bash -sudo k3s server & -# Kubeconfig is written to /etc/rancher/k3s/k3s.yaml -sudo k3s kubectl get nodes - -# On a different node run the below. NODE_TOKEN comes from -# /var/lib/rancher/k3s/server/node-token on your server -sudo k3s agent --server https://myserver:6443 --token ${NODE_TOKEN} -``` +Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the `K3S_NODE_NAME` environment variable and provide a value with a valid and unique hostname for each node. diff --git a/content/k3s/latest/en/running/_index.md b/content/k3s/latest/en/running/_index.md deleted file mode 100644 index 0782e165a14..00000000000 --- a/content/k3s/latest/en/running/_index.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: "Running K3S" -weight: 3 ---- - -This section contains information for running k3s in various environments. - -Starting the Server ------------------- - -The installation script will auto-detect if your OS is using systemd or openrc and start the service. -When running with openrc logs will be created at `/var/log/k3s.log`, or with systemd in `/var/log/syslog` and viewed using `journalctl -u k3s`. An example of installing and auto-starting with the install script: - -```bash -curl -sfL https://get.k3s.io | sh - -``` - -When running the server manually you should get an output similar to: - -``` -$ k3s server -INFO[2019-01-22T15:16:19.908493986-07:00] Starting k3s dev -INFO[2019-01-22T15:16:19.908934479-07:00] Running kube-apiserver --allow-privileged=true --authorization-mode Node,RBAC --service-account-signing-key-file /var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range 10.43.0.0/16 --advertise-port 6445 --advertise-address 127.0.0.1 --insecure-port 0 --secure-port 6444 --bind-address 127.0.0.1 --tls-cert-file /var/lib/rancher/k3s/server/tls/localhost.crt --tls-private-key-file /var/lib/rancher/k3s/server/tls/localhost.key --service-account-key-file /var/lib/rancher/k3s/server/tls/service.key --service-account-issuer k3s --api-audiences unknown --basic-auth-file /var/lib/rancher/k3s/server/cred/passwd --kubelet-client-certificate /var/lib/rancher/k3s/server/tls/token-node.crt --kubelet-client-key /var/lib/rancher/k3s/server/tls/token-node.key -Flag --insecure-port has been deprecated, This flag will be removed in a future version. -INFO[2019-01-22T15:16:20.196766005-07:00] Running kube-scheduler --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --port 0 --secure-port 0 --leader-elect=false -INFO[2019-01-22T15:16:20.196880841-07:00] Running kube-controller-manager --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --service-account-private-key-file /var/lib/rancher/k3s/server/tls/service.key --allocate-node-cidrs --cluster-cidr 10.42.0.0/16 --root-ca-file /var/lib/rancher/k3s/server/tls/token-ca.crt --port 0 --secure-port 0 --leader-elect=false -Flag --port has been deprecated, see --secure-port instead. -INFO[2019-01-22T15:16:20.273441984-07:00] Listening on :6443 -INFO[2019-01-22T15:16:20.278383446-07:00] Writing manifest: /var/lib/rancher/k3s/server/manifests/coredns.yaml -INFO[2019-01-22T15:16:20.474454524-07:00] Node token is available at /var/lib/rancher/k3s/server/node-token -INFO[2019-01-22T15:16:20.474471391-07:00] To join node to cluster: k3s agent -s https://10.20.0.3:6443 -t ${NODE_TOKEN} -INFO[2019-01-22T15:16:20.541027133-07:00] Wrote kubeconfig /etc/rancher/k3s/k3s.yaml -INFO[2019-01-22T15:16:20.541049100-07:00] Run: k3s kubectl -``` - -The output will likely be much longer as the agent will create a lot of logs. By default the server -will register itself as a node (run the agent). - -It is common and almost required these days that the control plane be part of the cluster. -To disable the agent when running the server use the `--disable-agent` flag, the agent can then be run as a separate process. - -Joining Nodes -------------- - -When the server starts it creates a file `/var/lib/rancher/k3s/server/node-token`. -Using the contents of that file as `K3S_TOKEN` and setting `K3S_URL` allows the node -to join as an agent using the install script: - - curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=XXX sh - - -When using the install script openrc logs will be created at `/var/log/k3s-agent.log`, or with systemd in `/var/log/syslog` and viewed using `journalctl -u k3s-agent`. - -Or running k3s manually with the token as `NODE_TOKEN`: - - k3s agent --server https://myserver:6443 --token ${NODE_TOKEN} - -SystemD -------- - -If you are using systemd here is a sample unit `k3s.service`: - -```ini -[Unit] -Description=Lightweight Kubernetes -Documentation=https://k3s.io -After=network-online.target - -[Service] -Type=notify -EnvironmentFile=/etc/systemd/system/k3s.service.env -ExecStart=/usr/local/bin/k3s server -KillMode=process -Delegate=yes -LimitNOFILE=infinity -LimitNPROC=infinity -LimitCORE=infinity -TasksMax=infinity -TimeoutStartSec=0 -Restart=always -RestartSec=5s - -[Install] -WantedBy=multi-user.target -``` - -OpenRC ------- - -And an example openrc `/etc/init.d/k3s`: - -```bash -#!/sbin/openrc-run - -depend() { - after net-online - need net -} - -start_pre() { - rm -f /tmp/k3s.* -} - -supervisor=supervise-daemon -name="k3s" -command="/usr/local/bin/k3s" -command_args="server >>/var/log/k3s.log 2>&1" - -pidfile="/var/run/k3s.pid" -respawn_delay=5 - -set -o allexport -if [ -f /etc/environment ]; then source /etc/environment; fi -if [ -f /etc/rancher/k3s/k3s.env ]; then source /etc/rancher/k3s/k3s.env; fi -set +o allexport -``` - -Alpine Linux ------------- - -In order to pre-setup Alpine Linux you have to go through the following steps: - -```bash -echo "cgroup /sys/fs/cgroup cgroup defaults 0 0" >> /etc/fstab - -cat >> /etc/cgconfig.conf < 11s v1.13.2-k3s2 - d54c8b17c055 Ready 11s v1.13.2-k3s2 - db7a5a5a5bdd Ready 12s v1.13.2-k3s2 - -To run the agent only in Docker, use `docker-compose up node`. Alternatively the Docker run command can also be used; - - sudo docker run \ - -d --tmpfs /run \ - --tmpfs /var/run \ - -e K3S_URL=${SERVER_URL} \ - -e K3S_TOKEN=${NODE_TOKEN} \ - --privileged rancher/k3s:vX.Y.Z - -Air-Gap Support ---------------- - -k3s supports pre-loading of containerd images by placing them in the `images` directory for the agent before starting, for example: -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` -Images needed for a base install are provided through the releases page, additional images can be created with the `docker save` command. - -Offline Helm charts are served from the `/var/lib/rancher/k3s/server/static` directory, and Helm chart manifests may reference the static files with a `%{KUBERNETES_API}%` templated variable. For example, the default traefik manifest chart installs from `https://%{KUBERNETES_API}%/static/charts/traefik-X.Y.Z.tgz`. - -If networking is completely disabled k3s may not be able to start (ie ethernet unplugged or wifi disconnected), in which case it may be necessary to add a default route. For example: -```sh -sudo ip -c address add 192.168.123.123/24 dev eno1 -sudo ip route add default via 192.168.123.1 -``` - -k3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -Upgrades --------- - -To upgrade k3s from an older version you can re-run the installation script using the same flags, for example: - -```sh -curl -sfL https://get.k3s.io | sh - -``` - -If you want to upgrade to specific version you can run the following command: - -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - -``` - -Or to manually upgrade k3s: - -1. Download the desired version of k3s from [releases](https://github.com/rancher/k3s/releases/latest) -2. Install to an appropriate location (normally `/usr/local/bin/k3s`) -3. Stop the old version -4. Start the new version - -Restarting k3s is supported by the installation script for systemd and openrc. -To restart manually for systemd use: -```sh -sudo systemctl restart k3s -``` - -To restart manually for openrc use: -```sh -sudo service k3s restart -``` - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download air-gap images and install if changed -2. Install new k3s binary (from installer or manual download) -3. Restart k3s (if not restarted automatically by installer) - -Uninstalling ------------- - -If you installed k3s with the help of `install.sh` script an uninstall script is generated during installation, which will be created on your server node at `/usr/local/bin/k3s-uninstall.sh` (or as `k3s-agent-uninstall.sh`). - -Hyperkube ---------- - -k3s is bundled in a nice wrapper to remove the majority of the headache of running k8s. If -you don't want that wrapper and just want a smaller k8s distro, the releases includes -the `hyperkube` binary you can use. It's then up to you to know how to use `hyperkube`. If -you want individual binaries you will need to compile them yourself from source. diff --git a/content/k3s/latest/en/storage/_index.md b/content/k3s/latest/en/storage/_index.md new file mode 100644 index 00000000000..a567420905b --- /dev/null +++ b/content/k3s/latest/en/storage/_index.md @@ -0,0 +1,122 @@ +--- +title: "Volumes and Storage" +weight: 30 +--- + +When deploying an application that needs to retain data, you’ll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application’s pod fails. + +# Local Storage Provider +K3s comes with Rancher's Local Path Provisioner and this enables the ability to create persistent volume claims out of the box using local storage on the respective node. Below we cover a simple example. For more information please reference the official documentation [here](https://github.com/rancher/local-path-provisioner/blob/master/README.md#usage). + +Create a hostPath backed persistent volume claim and a pod to utilize it: + +### pvc.yaml + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: local-path-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + storageClassName: local-path + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +``` +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: local-path-pvc +``` + +Apply the yaml `kubectl create -f pvc.yaml` and `kubectl create -f pod.yaml` + +Confirm the PV and PVC are created. `kubectl get pv` and `kubectl get pvc` The status should be Bound for each. + +# Longhorn + +[comment]: <> (pending change - longhorn may support arm64 and armhf in the future.) + +> **Note:** At this time Longhorn only supports amd64. + +K3s supports [Longhorn](https://github.com/longhorn/longhorn). Below we cover a simple example. For more information please reference the official documentation [here](https://github.com/longhorn/longhorn/blob/master/README.md). + +Apply the longhorn.yaml to install Longhorn. + +``` +kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml +``` + +Longhorn will be installed in the namespace `longhorn-system`. + +Before we create a PVC, we will create a storage class for longhorn with this yaml. + +``` +kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/examples/storageclass.yaml +``` + +Now, apply the following yaml to create the PVC and pod with `kubectl create -f pvc.yaml` and `kubectl create -f pod.yaml` + +### pvc.yaml + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: longhorn-volv-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: longhorn + resources: + requests: + storage: 2Gi +``` + +### pod.yaml + +``` +apiVersion: v1 +kind: Pod +metadata: + name: volume-test + namespace: default +spec: + containers: + - name: volume-test + image: nginx:stable-alpine + imagePullPolicy: IfNotPresent + volumeMounts: + - name: volv + mountPath: /data + ports: + - containerPort: 80 + volumes: + - name: volv + persistentVolumeClaim: + claimName: longhorn-volv-pvc +``` + +Confirm the PV and PVC are created. `kubectl get pv` and `kubectl get pvc` The status should be Bound for each. diff --git a/content/k3s/latest/en/upgrades/_index.md b/content/k3s/latest/en/upgrades/_index.md new file mode 100644 index 00000000000..62d3dd47cd8 --- /dev/null +++ b/content/k3s/latest/en/upgrades/_index.md @@ -0,0 +1,36 @@ +--- +title: "Upgrades" +weight: 25 +--- + +>**Note:** When upgrading, upgrade server nodes first one at a time then any worker nodes. + +To upgrade K3s from an older version you can re-run the installation script using the same flags, for example: + +```sh +curl -sfL https://get.k3s.io | sh - +``` + +If you want to upgrade to specific version you can run the following command: + +```sh +curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - +``` + +Or to manually upgrade K3s: + +1. Download the desired version of K3s from [releases](https://github.com/rancher/k3s/releases/latest) +2. Install to an appropriate location (normally `/usr/local/bin/k3s`) +3. Stop the old version +4. Start the new version + +Restarting K3s is supported by the installation script for systemd and openrc. +To restart manually for systemd use: +```sh +sudo systemctl restart k3s +``` + +To restart manually for openrc use: +```sh +sudo service k3s restart +``` \ No newline at end of file diff --git a/content/os/v1.x/en/_index.md b/content/os/v1.x/en/_index.md index 258d634ed5d..1fd27ba96da 100644 --- a/content/os/v1.x/en/_index.md +++ b/content/os/v1.x/en/_index.md @@ -35,7 +35,7 @@ System Docker runs a special container called **Docker**, which is another Docke We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. -![How it works]({{< baseurl >}}/img/os/rancheroshowitworks.png) +{{< img "/img/os/rancheroshowitworks.png" "How it works">}} ### Running RancherOS diff --git a/content/os/v1.x/en/about/security/_index.md b/content/os/v1.x/en/about/security/_index.md index 8697f09084a..a7cc30096dc 100644 --- a/content/os/v1.x/en/about/security/_index.md +++ b/content/os/v1.x/en/about/security/_index.md @@ -33,7 +33,7 @@ weight: 303 | [CVE-2017-5715](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715) | Systems with microprocessors utilizing speculative execution and indirect branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis | 6 Feb 2018 | [RancherOS v1.1.4](https://github.com/rancher/os/releases/tag/v1.1.4) using Linux v4.9.78 with the Retpoline support | | [CVE-2017-5753](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5753) | Systems with microprocessors utilizing speculative execution and branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | | [CVE-2018-8897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-8897) | A statement in the System Programming Guide of the Intel 64 and IA-32 Architectures Software Developer's Manual (SDM) was mishandled in the development of some or all operating-system kernels, resulting in unexpected behavior for #DB exceptions that are deferred by MOV SS or POP SS, as demonstrated by (for example) privilege escalation in Windows, macOS, some Xen configurations, or FreeBSD, or a Linux kernel crash. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | -| [L1 Terminal Fault](https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | +| [CVE-2018-3620](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3620) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | | [CVE-2018-3639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639) | Systems with microprocessors utilizing speculative execution and speculative execution of memory reads before the addresses of all prior memory writes are known may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis, aka Speculative Store Bypass (SSB), Variant 4. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | | [CVE-2018-17182](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17182) | The vmacache_flush_all function in mm/vmacache.c mishandles sequence number overflows. An attacker can trigger a use-after-free (and possibly gain privileges) via certain thread creation, map, unmap, invalidation, and dereference operations. | 18 Oct 2018 | [RancherOS v1.4.2](https://github.com/rancher/os/releases/tag/v1.4.2) using Linux v4.14.73 | | [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) | runc through 1.0-rc6, as used in Docker before 18.09.2 and other products, allows attackers to overwrite the host runc binary (and consequently obtain host root access) by leveraging the ability to execute a command as root within one of these types of containers: (1) a new container with an attacker-controlled image, or (2) an existing container, to which the attacker previously had write access, that can be attached with docker exec. This occurs because of file-descriptor mishandling, related to /proc/self/exe. | 12 Feb 2019 | [RancherOS v1.5.1](https://github.com/rancher/os/releases/tag/v1.5.1) | diff --git a/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md new file mode 100644 index 00000000000..13ec156209f --- /dev/null +++ b/content/os/v1.x/en/installation/configuration/date-and-timezone/_index.md @@ -0,0 +1,22 @@ +--- +title: Date and time zone +weight: 121 +--- + +The default console keeps time in the Coordinated Universal Time (UTC) zone and synchronizes clocks with the Network Time Protocol (NTP). The Network Time Protocol daemon (ntpd) is an operating system program that maintains the system time in synchronization with time servers using the NTP. + +RancherOS can run ntpd in the System Docker container. You can update its configurations by updating `/etc/ntp.conf`. For an example of how to update a file such as `/etc/ntp.conf` within a container, refer to [this page.]({{< baseurl >}}/os/v1.x/en/installation/configuration/write-files/#writing-files-in-specific-system-services) + +The default console cannot support changing the time zone because including `tzdata` (time zone data) will increase the ISO size. However, you can change the time zone in the container by passing a flag to specify the time zone when you run the container: + +``` +$ docker run -e TZ=Europe/Amsterdam debian:jessie date +Tue Aug 20 09:28:19 CEST 2019 +``` + +You may need to install the `tzdata` in some images: + +``` +$ docker run -e TZ=Asia/Shanghai -e DEBIAN_FRONTEND=noninteractive -it --rm ubuntu /bin/bash -c "apt-get update && apt-get install -yq tzdata && date” +Thu Aug 29 08:13:02 CST 2019 +``` diff --git a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md index 68f8389866e..697189f8d9d 100644 --- a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md +++ b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md @@ -64,7 +64,7 @@ $ USER_DOCKER_VERSION=17.03.2 make release _Available as of v1.5.0_ -When building RancherOS, you have the ability to automatically start in a supported [console]({{< baseurl >}}/os/v1.x/en/installation/switching-consoles/) instead of booting into the default console and switching to your desired one. +When building RancherOS, you have the ability to automatically start in a supported console instead of booting into the default console and switching to your desired one. Here is an example of building RancherOS and having the `alpine` console enabled: diff --git a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md index 969fc387daa..e8886b5f617 100644 --- a/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md +++ b/content/os/v1.x/en/installation/running-rancheros/cloud/aws/_index.md @@ -25,17 +25,17 @@ Let’s walk through how to import and create a RancherOS on EC2 machine using t 1. First login to your AWS console, and go to the EC2 dashboard, click on **Launch Instance**: - ![RancherOS on AWS 1]({{< baseurl >}}/img/os/Rancher_aws1.png) + {{< img "/img/os/Rancher_aws1.png" "RancherOS on AWS 1">}} 2. Select the **Community AMIs** on the sidebar and search for **RancherOS**. Pick the latest version and click **Select**. - ![RancherOS on AWS 2]({{< baseurl >}}/img/os/Rancher_aws2.png) + {{< img "/img/os/Rancher_aws2.png" "RancherOS on AWS 2">}} 3. Go through the steps of creating the instance type through the AWS console. If you want to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/installation/configuration/#cloud-config) file during boot of RancherOS, you'd pass in the file as **User data** by expanding the **Advanced Details** in **Step 3: Configure Instance Details**. You can pass in the data as text or as a file. - ![RancherOS on AWS 6]({{< baseurl >}}/img/os/Rancher_aws6.png) + {{< img "/img/os/Rancher_aws6.png" "RancherOS on AWS 6">}} After going through all the steps, you finally click on **Launch**, and either create a new key pair or choose an existing key pair to be used with the EC2 instance. If you have created a new key pair, download the key pair. If you have chosen an existing key pair, make sure you have the key pair accessible. Click on **Launch Instances**. - ![RancherOS on AWS 3]({{< baseurl >}}/img/os/Rancher_aws3.png) + {{< img "/img/os/Rancher_aws3.png" "RancherOS on AWS 3">}} 4. Your instance will be launching and you can click on **View Instances** to see it's status. - ![RancherOS on AWS 4]({{< baseurl >}}/img/os/Rancher_aws4.png) + {{< img "/img/os/Rancher_aws4.png" "RancherOS on AWS 4">}} Your instance is now running! - ![RancherOS on AWS 5]({{< baseurl >}}/img/os/Rancher_aws5.png) + {{< img "/img/os/Rancher_aws5.png" "RancherOS on AWS 5">}} ## Logging into RancherOS diff --git a/content/os/v1.x/en/installation/system-services/environment/_index.md b/content/os/v1.x/en/installation/system-services/environment/_index.md index a0d9746613c..c3990e318a9 100644 --- a/content/os/v1.x/en/installation/system-services/environment/_index.md +++ b/content/os/v1.x/en/installation/system-services/environment/_index.md @@ -3,7 +3,7 @@ title: Environment weight: 143 --- -The [environment key](https://docs.docker.com/compose/yml/#environment) can be used to customize system services. When a value is not assigned, RancherOS looks up the value from the `rancher.environment` key. +The [environment key](https://docs.docker.com/compose/compose-file/#environment) can be used to customize system services. When a value is not assigned, RancherOS looks up the value from the `rancher.environment` key. In the example below, `ETCD_DISCOVERY` will be set to `https://discovery.etcd.io/d1cd18f5ee1c1e2223aed6a1734719f7` for the `etcd` service. diff --git a/content/os/v1.x/en/overview/_index.md b/content/os/v1.x/en/overview/_index.md index 1258dfe7db9..264f130ef15 100644 --- a/content/os/v1.x/en/overview/_index.md +++ b/content/os/v1.x/en/overview/_index.md @@ -35,7 +35,7 @@ System Docker runs a special container called **Docker**, which is another Docke We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. -![How it works]({{< baseurl >}}/img/os/rancheroshowitworks.png) +{{< img "/img/os/rancheroshowitworks.png" "How it works">}} ### Running RancherOS diff --git a/content/os/v1.x/en/quick-start-guide/_index.md b/content/os/v1.x/en/quick-start-guide/_index.md index 945ef763043..7e01e0fc0a3 100644 --- a/content/os/v1.x/en/quick-start-guide/_index.md +++ b/content/os/v1.x/en/quick-start-guide/_index.md @@ -92,7 +92,7 @@ $ sudo system-docker run -d --net=host --name busydash husseingalal/busydash ``` In the command, we used `--net=host` to tell System Docker not to containerize the container's networking, and use the host’s networking instead. After running the container, you can see the monitoring server by accessing `http://`. -![System Docker Container]({{< baseurl >}}/img/os/Rancher_busydash.png) +{{< img "/img/os/Rancher_busydash.png" "System Docker Container">}} To make the container survive during the reboots, you can create the `/opt/rancher/bin/start.sh` script, and add the Docker start line to launch the Docker at each startup. diff --git a/content/rancher/v2.x/en/admin-settings/_index.md b/content/rancher/v2.x/en/admin-settings/_index.md index bcee2952f0c..b39b1e6157e 100644 --- a/content/rancher/v2.x/en/admin-settings/_index.md +++ b/content/rancher/v2.x/en/admin-settings/_index.md @@ -42,7 +42,7 @@ Drivers in Rancher allow you to manage which providers can be used to provision For more information, see [Provisioning Drivers]({{< baseurl >}}/rancher/v2.x/en/admin-settings/drivers/). -## Adding Kubernetes Versions into RANCHER +## Adding Kubernetes Versions into Rancher _Available as of v2.3.0_ diff --git a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md index 037a2fc6881..6725f47f482 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/ad/_index.md @@ -74,11 +74,12 @@ The table below details the parameters for the user schema section configuration | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for user objects in your domain. | +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Username Attribute | The user attribute whose value is suitable as a display name. | | Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | | User Member Attribute | The attribute containing the groups that a user is a member of. | | Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | +| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | | User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | | Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | @@ -92,11 +93,12 @@ The table below details the parameters for the group schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for group objects in your domain. | +| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Name Attribute | The group attribute whose value is suitable for a display name. | | Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | | Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | | Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | +| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | | Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | | Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members). | @@ -146,7 +148,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: -![LDAP User]({{< baseurl >}}/img/rancher/ldapsearch-user.png) +{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. @@ -179,7 +181,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command will inform us on the attributes used for group objects: -![LDAP Group]({{< baseurl >}}/img/rancher/ldapsearch-group.png) +{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} Again, this allows us to determine the correct values to enter in the group schema configuration: diff --git a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md index 34f2f545d68..b4879220c29 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/azure-ad/_index.md @@ -9,6 +9,8 @@ _Available as of v2.0.3_ If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. +>**Note:** Azure AD integration only supports Service Provider initiated logins. + >**Prerequisite:** Have an instance of Azure AD configured. >**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). diff --git a/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md index e644f17a58e..b47426d3471 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/google/_index.md @@ -94,6 +94,9 @@ Using the Unique ID of the service account key, register it as an Oauth Client u 1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. 1. From the **Global** view, click **Security > Authentication** from the main menu. 1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. + 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. + 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. + 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md index a05c9709e29..822a991e3e9 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md @@ -9,57 +9,57 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re 1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - + {{< img "/img/rancher/adfs/adfs-overview.png" "">}} 1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - + {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} 1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - + {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} 1. Select **AD FS profile** as the configuration profile for your relying party trust. - + {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} 1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - + {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} 1. Select **Enable support for the SAML 2.0 WebSSO protocol** and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - + {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} 1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} 1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - + {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} 1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} 1. After reviewing your settings, select **Next** to add the relying party trust. - + {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} 1. Select **Open the Edit Claim Rules...** and click **Close**. - + {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} 1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - + {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} 1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - + {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} 1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: @@ -70,7 +70,7 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re | Token-Groups - Qualified by Long Domain Name | Group | | SAM-Account-Name | Name |
- + {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} 1. Download the `federationmetadata.xml` from your AD server at: ``` diff --git a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md index b0af27f7f12..517cd8f6975 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/okta/_index.md @@ -7,6 +7,8 @@ _Available as of v2.2.0_ If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. +>**Note:** Okta integration only supports Service Provider initiated logins. + ## Prerequisites In Okta, create a SAML Application with the settings below. See the [Okta documentation](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta) for help. diff --git a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md index 2777c006cac..0ffa2607e3c 100644 --- a/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md +++ b/content/rancher/v2.x/en/admin-settings/authentication/openldap/_index.md @@ -75,7 +75,7 @@ The table below details the parameters for the user schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for user objects in your domain. | +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Username Attribute | The user attribute whose value is suitable as a display name. | | Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | | User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | @@ -93,7 +93,7 @@ The table below details the parameters for the group schema configuration. | Parameter | Description | |:--|:--| -| Object Class | The name of the object class used for group entries in your domain. | +| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | | Name Attribute | The group attribute whose value is suitable for a display name. | | Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | | Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | diff --git a/content/rancher/v2.x/en/admin-settings/feature-flags/_index.md b/content/rancher/v2.x/en/admin-settings/feature-flags/_index.md index fc9a9dbe0c9..97baf7a4249 100644 --- a/content/rancher/v2.x/en/admin-settings/feature-flags/_index.md +++ b/content/rancher/v2.x/en/admin-settings/feature-flags/_index.md @@ -25,10 +25,16 @@ For example, if you install Rancher, then set a feature flag to true with the Ra The following is a list of the feature flags available in Rancher: -Feature | Environment Variable Key | Default Value | Description | Available as of | ----|---|---|---|--- -[Allow unsupported storage drivers]({{}}/rancher/v2.x/en/admin-settings/feature-flags/enable-not-default-storage-drivers) | `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. | v2.3.0 -[UI for Istio virtual services and destination rules]({{}}/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui) | `istio-virtual-service-ui`| `false` | Enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio | v2.3.0 +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.x/en/admin-settings/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. + +The below table shows the availability and default value for feature flags in Rancher: + +Feature Flag Name | Default Value | Status | Available as of | +---|---|---|--- +`unsupported-storage-drivers` | `false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 # Enabling Features when Starting Rancher @@ -38,21 +44,21 @@ When you install Rancher, enable the feature you want with a feature flag. The c {{% tabs %}} {{% tab "HA Install" %}} -When installing Rancher with a Helm chart, use the `--features` option: +When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: ``` helm install rancher-latest/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 ``` ### Rendering the Helm Chart for Air Gap Installations For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.x/en/installation/air-gap/install-rancher) -Here is an example of a command for passing in the feature flag options when rendering the Helm template: +Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list: ``` helm template ./rancher-.tgz --output-dir . \ --name rancher \ @@ -63,16 +69,16 @@ helm template ./rancher-.tgz --output-dir . \ --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 ``` {{% /tab %}} {{% tab "Single Node Install" %}} -When installing Rancher with Docker, use the `--features` option: +When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: ``` docker run -d -p 80:80 -p 443:443 \ --restart=unless-stopped \ rancher/rancher:rancher-latest \ - --features==true,=true # Available as of v2.3.0 + --features==true,=true # Available as of v2.3.0 ``` {{% /tab %}} {{% /tabs %}} diff --git a/content/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui/_index.md index 7c709b9b138..8fc6fbd2d6f 100644 --- a/content/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui/_index.md +++ b/content/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui/_index.md @@ -4,13 +4,16 @@ weight: 2 --- _Available as of v2.3.0_ +This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. + > **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup) in order to use the feature. To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.x/en/admin-settings/feature-flags) -Environment Variable Key | Default Value | Description ----|---|--- -`istio-virtual-service-ui`| `false` | Enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio +Environment Variable Key | Default Value | Status | Available as of +---|---|---|--- +`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 # About this Feature diff --git a/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md index 59449404d16..8bccf7c9c87 100644 --- a/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md +++ b/content/rancher/v2.x/en/admin-settings/k8s-metadata/_index.md @@ -7,7 +7,7 @@ _Available as of v2.3.0_ The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. -**Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. +> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. @@ -27,13 +27,13 @@ Administrators might configure the RKE metadata settings to do the following: - Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub - Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher -# Refresh Kubernetes Metadata +### Refresh Kubernetes Metadata The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. -# Configuring the Metadata Synchronization +### Configuring the Metadata Synchronization > Only administrators can change these settings. @@ -53,7 +53,7 @@ If you don't have an air gap setup, you don't need to specify the URL or Git bra However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. -# Air Gap Setups +### Air Gap Setups Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) diff --git a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md index d3e61d8b2ea..91ea1123625 100644 --- a/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rbac/locked-roles/_index.md @@ -27,7 +27,7 @@ If you want to prevent a role from being assigned to users, you can set it to a You can lock roles in two contexts: -- When you're [adding a custom role](({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). +- When you're [adding a custom role]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/default-custom-roles/). - When you editing an existing role (see below). 1. From the **Global** view, select **Security** > **Roles**. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md index eccaf4c283f..4f686c0222a 100644 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/enforcement/_index.md @@ -22,7 +22,7 @@ You might want to require new clusters to use a template to ensure that any clus To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: 1. From the **Global** view, click the **Settings** tab. -1. Go to the `rke-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** +1. Go to the `cluster-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** 1. Set the value to **True** and click **Save.** **Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. @@ -32,7 +32,7 @@ To require new clusters to use an RKE template, administrators can turn on RKE t To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: 1. From the **Global** view, click the **Settings** tab. -1. Go to the `rke-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** +1. Go to the `cluster-template-enforcement` setting. Click the vertical **Ellipsis (...)** and click **Edit.** 1. Set the value to **False** and click **Save.** -**Result:** When clusters are provisioned by Rancher, they don't need to use a template. \ No newline at end of file +**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md b/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md index 4fbe8a08a3c..3c85e86d616 100644 --- a/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md +++ b/content/rancher/v2.x/en/admin-settings/rke-templates/example-yaml/_index.md @@ -12,8 +12,21 @@ The YAML in the RKE template uses the same customization that is used when you c # Cluster Config # docker_root_dir: /var/lib/docker + enable_cluster_alerting: false -enable_cluster_monitoring: false +# This setting is not enforced. Clusters +# created with this sample template +# would have alerting turned off by default, +# but end users could still turn alerting +# on or off. + +enable_cluster_monitoring: true +# This setting is not enforced. Clusters +# created with this sample template +# would have monitoring turned on +# by default, but end users could still +# turn monitoring on or off. + enable_network_policy: false local_cluster_auth_endpoint: enabled: true diff --git a/content/rancher/v2.x/en/best-practices/containers/_index.md b/content/rancher/v2.x/en/best-practices/containers/_index.md index c526e79b73a..ce67e87ef6d 100644 --- a/content/rancher/v2.x/en/best-practices/containers/_index.md +++ b/content/rancher/v2.x/en/best-practices/containers/_index.md @@ -32,7 +32,7 @@ When possible, use a non-privileged user when running processes within your cont ### Define Resource Limits Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. -In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the [Rancher docs]({{}}rancher/v2.x/en/project-admin/resource-quotas/). +In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the [Rancher docs]({{}}/rancher/v2.x/en/project-admin/resource-quotas/). When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. diff --git a/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md b/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md index 9a8a6bf5b19..a42e84284de 100644 --- a/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md +++ b/content/rancher/v2.x/en/best-practices/deployment-strategies/_index.md @@ -13,7 +13,7 @@ There are two recommended deployment strategies. Each one has its own pros and c In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run in an HA (high-availability) configuration, and there would be impact due to latencies. -![Hub and Spoke Deployment]({{< baseurl >}}/img/rancher/bpg/hub-and-spoke.png) +{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} ### Pros @@ -30,7 +30,7 @@ In this deployment scenario, there is a single Rancher control plane managing Ku --- In the regional deployment model a control plane is deployed in close proximity to the compute nodes. -![Regional Deployment]({{< baseurl >}}/img/rancher/bpg/regional.png) +{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} ### Pros diff --git a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md index 83f6361e8e9..b097e8b7b12 100644 --- a/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/cloning-clusters/_index.md @@ -53,6 +53,8 @@ Begin by using Rancher CLI to export the configuration for the cluster that you Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. +> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. diff --git a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md index 206a3779a36..4eb7e29a741 100644 --- a/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/editing-clusters/_index.md @@ -61,9 +61,11 @@ When editing clusters, clusters that are [launched using RKE]({{< baseurl >}}/ra ### Upgrading Kubernetes -Following an upgrade to the latest version of Rancher, you can update your existing clusters to use the latest supported version of Kubernetes. Before a new version of Rancher is released, it's tested with the latest versions of Kubernetes to ensure compatibility. +Following an upgrade to the latest version of Rancher, you can update your existing clusters to use the latest supported version of Kubernetes. -As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows you to use newer Kubernetes versions as soon as they are released, without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) +Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For example, Rancher v2.3.0 is was tested with Kubernetes v1.15.4, v1.14.7, and v1.13.11. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.3.0/) + +As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.x/en/admin-settings/k8s-metadata) >**Recommended:** Before upgrading Kubernetes, [backup your cluster]({{< baseurl >}}/rancher/v2.x/en/backups). diff --git a/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md index 63ad50061d8..13277b3fbc4 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts/_index.md @@ -54,9 +54,4 @@ Alerts can be triggered based on node metrics. Each computing resource in a Kube | Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | # Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. - -| Alert | Explanation | -|-------|-------------| -| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | -| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | \ No newline at end of file +When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.x/en/project-admin/tools/alerts/#default-project-level-alerts) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md index 8beaf13b318..da1fbcacc7a 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/_index.md @@ -14,7 +14,7 @@ If you use Istio for traffic management, you will need to allow external traffic 1. [Enable Istio in the cluster.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) 1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) 1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) -1. [Add deployments and services that have the Istio sidecar injected.](#deploy-workloads-in-the-cluster) +1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads) 1. [Set up the Istio gateway. ]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) 1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) 1. [Generate traffic and see Istio in action.](#generate-traffic-and-see-istio-in-action) @@ -23,6 +23,6 @@ If you use Istio for traffic management, you will need to allow external traffic This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.x/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.x/en/cluster-provisioning) on which you will install Istio. -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/istio/#cpu-and-memory-requirements) +The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/resources/) -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) \ No newline at end of file +The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md index b0679a6ca1d..38bb20f588a 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md @@ -15,9 +15,10 @@ Wait a few minutes for the workload to upgrade to have the istio sidecar. Click Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. -1. Go to the cluster view and click **Import YAML.** +1. Go to the project inside the cluster you want to deploy the workload on. +1. In Workloads, click **Import YAML.** 1. Copy the below resources into the form. -1. Click **Import.** +1. Click **Import.** This will set up the following sample resources from Istio's example BookInfo app: @@ -318,4 +319,4 @@ spec: --- ``` -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) \ No newline at end of file +### [Next: Set up the Istio Gateway]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md index 96b5d2590e0..8d6d64b099b 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md @@ -7,7 +7,7 @@ This cluster uses the default Nginx controller to allow traffic into the cluster A Rancher [administrator]({{}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. -1. From the **Global** view, navigate to the cluster where you want to enable Istio. +1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. 1. Click **Tools > Istio.** 1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/config/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. 1. Click **Enable**. @@ -19,4 +19,4 @@ The Istio application, `cluster-istio`, is added as an [application]({{ When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. -### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) \ No newline at end of file +### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md index d99e00661f5..948d15c7c05 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md @@ -21,4 +21,25 @@ This namespace setting will only affect new workloads in the namespace. Any pree To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. +### Excluding Workloads from Being Injected with the Istio Sidecar + +If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: + +``` +sidecar.istio.io/inject: “false” +``` + +To add the annotation to a workload, + +1. From the **Global** view, open the project that has the workload that should not have the sidecar. +1. Click **Resources > Workloads.** +1. Go to the workload that should not have the sidecar and click **Ellipsis (...) > Edit.** +1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. +1. Click **Add Annotation.** +1. In the **Key** field, enter `sidecar.istio.io/inject`. +1. In the **Value** field, enter `false`. +1. Click **Save.** + +**Result:** The Istio sidecar will not be injected into the workload. + ### [Next: Set up Taints and Tolerations]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md index 884882b9cf8..771af212486 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/istio/setup/gateway/_index.md @@ -53,13 +53,40 @@ spec: protocol: HTTP hosts: - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 ``` **Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. Confirm that the resource exists by running: ``` -kubectl get gateway +kubectl get gateway -A ``` The result should be something like this: @@ -100,4 +127,4 @@ In the gateway resource, the selector refers to Istio's default ingress controll 1. Within `istio-system`, there is a workload named `istio-ingressgateway`. 1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) \ No newline at end of file +### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md index f6f0a6acae1..e5aa8f8482f 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/logging/syslog/_index.md @@ -21,6 +21,8 @@ If you are using rsyslog, please make sure your rsyslog authentication mode is ` 1. Select a **Log Severity** for events that are logged to the Syslog server. For more information on each severity level, see the [Syslog protocol documentation](https://tools.ietf.org/html/rfc5424#page-11). + - By specifying a **Log Severity** does not mean that will act as a filtering mechanism for logs. To do that you should use a parser on the Syslog server. + ## Encryption Configuration If your Syslog server is using **TCP** protocol and uses TLS, you need to select **Use TLS** and complete the **Encryption Configuration** form. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md index 256bfa306eb..14c797848cf 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/cluster-metrics/_index.md @@ -101,7 +101,7 @@ Workload metrics display the hardware utilization for a Kubernetes workload. You 1. From the **Global** view, navigate to the project that you want to view workload metrics. -1. Select **Workloads > Workloads** in the navigation bar. +1. From the main navigation bar, choose **Resources > Workloads.** In versions prior to v2.3.0, choose **Workloads** on the main navigation bar. 1. Select a specific workload and click on its name. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md index 6880c8aec52..343968af833 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/monitoring/viewing-metrics/_index.md @@ -15,9 +15,9 @@ Rancher's dashboards are available at multiple locations: - **Cluster Dashboard**: From the **Global** view, navigate to the cluster. - **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** -- **Workload Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Click **Workload Metrics.** +- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** - **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** -- **Container Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** +- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. diff --git a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md index aec464ab46e..59a82734bd9 100644 --- a/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/tools/notifiers/_index.md @@ -65,6 +65,7 @@ _Available as of v2.2.0_ 1. Select the **Recipient Type** and then enter a corresponding id to **Default Recipient** field, for example, the party id, tag id or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). {{% /accordion %}} +1. _Available as of v2.3.0_ - Select **Enable** for **Send Resolved Alerts** if you wish to notify about resolved alerts. 1. Click **Add** to complete adding the notifier. **Result:** Your notifier is added to Rancher. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md index 0032cecead6..e8dea8adda3 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/_index.md @@ -189,6 +189,33 @@ services: - "/sbin/iscsiadm:/sbin/iscsiadm" ``` +## GlusterFS Volumes With Rancher Launched Kubernetes Clusters + +In [Rancher Launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: + +- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) +- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) + +``` +docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version +``` + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/usr/bin/systemd-run:/usr/bin/systemd-run" +``` + +After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: + +``` +Detected OS with systemd +``` ## What's Next? diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md index ee192cf9b17..4f811564629 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md @@ -23,7 +23,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 3. Enter a **Name** for the class. 4. Under **Provisioner**, select **VMWare vSphere Volume**. - ![vsphere-storage-class]({{< baseurl >}}/img/rancher/vsphere-storage-class.png) + {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} 5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. 5. Click **Save**. @@ -37,7 +37,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. 6. Enter the required **Capacity** for the volume. Then click **Define**. - ![workload-add-volume]({{< baseurl >}}/img/rancher/workload-add-volume.png) + {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} 7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. 8. Click **Launch** to create the workload. diff --git a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md index 23f3a45aabb..0df11a21405 100644 --- a/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md +++ b/content/rancher/v2.x/en/cluster-admin/volumes-and-storage/persistent-volume-claims/_index.md @@ -10,14 +10,14 @@ _Persistent Volume Claims_ (or PVCs) are objects that request storage resources - Rancher lets you create as many PVCs within a project as you'd like. - You can mount PVCs to a deployment as you create it, or later after its running. -- Each Rancher project contains a list of PVCs that you've created, available from the **Volumes** tab. You can reuse these PVCs when creating deployments in the future. +- Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions prior to v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. >**Prerequisite:** > You must have a pre-provisioned [persistent volume]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-a-persistent-volume) available for use, or you must have a [storage class created]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/#adding-storage-classes) that dynamically creates a volume upon request from the workload. 1. From the **Global** view, open the project containing a workload that you want to add a PVC to. -1. From the main menu, make sure that **Workloads** is selected. Then select the **Volumes** tab. Click **Add Volume**. +1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. 1. Enter a **Name** for the volume claim. diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md index 1a993231f66..45c2dfa3e48 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md @@ -50,6 +50,8 @@ Huawei CCE service doesn't support the ability to create clusters with public ac | Cluster Label | The labels for the cluster. | | Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | +> **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 7. Fill the following node configuration of the cluster: |Settings|Description| diff --git a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md index ad9c0b4bbde..9899c63be38 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md @@ -48,6 +48,8 @@ You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | +> Note: If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + 7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. | Option | Description | diff --git a/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md new file mode 100644 index 00000000000..cf5e5cddd61 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/node-requirements/_index.md @@ -0,0 +1,160 @@ +--- +title: Node Requirements for User Clusters +weight: 1 +--- + +This page describes the requirements for the nodes where your apps and services will be installed. + +In this section, "user cluster" refers to a cluster running your apps, which should be separate from the cluster (or single node) running Rancher. + +> It is important to note that if Rancher is installed on a high-availability Kubernetes cluster, the Rancher server cluster and user clusters have the same requirements for OS and Docker, but other requirements are different. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.x/en/installation/requirements/) + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- Operating systems and Docker requirements - same as the [requirements for Rancher installation]({{}}/rancher/v2.x/en/installation/requirements/#operating-systems-and-docker-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) + +# Operating Systems and Docker Requirements + +For the nodes in user clusters, the requirements for the operating system and Docker version are the same as the [OS and Docker requirements for the Rancher server cluster.]({{}}/rancher/v2.x/en/installation/requirements/#operating-systems-and-docker-requirements) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.x/en/cluster-provisioning/#cluster-creation-options). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found in the following sections: + +- [Commonly used ports](#commonly-used-ports) +- [Port requirements for custom clusters](#port-requirements-for-custom-clusters) +- [Port requirements for clusters hosted by an infrastructure provider](#port-requirements-for-clusters-hosted-by-an-infrastructure-provider) + - [Security group for nodes on AWS EC2](#security-group-for-nodes-on-aws-ec2) +- [Port requirements for clusters hosted by a Kubernetes provider](#port-requirements-for-clusters-hosted-by-a-kubernetes-provider) +- [Port requirements for imported clusters](#port-requirements-for-imported-clusters) +- [Port requirements for local traffic](#port-requirements-for-local-traffic) + +### Commonly Used Ports + +If security isn't a large concern and you're okay with opening a few additional ports, you can use this table as your port reference instead of the comprehensive tables in the following sections. + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +{{% accordion id="common-ports" label="Click to Expand" %}} + +
Commonly Used Ports Reference
+ +| Protocol | Port | Description | +|:--------: |:----------------: |------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | kubelet API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-
32767 | NodePort port range | + +{{% /accordion %}} + +### Port Requirements for Custom Clusters + +If you are launching a Kubernetes cluster on your existing infrastructure, refer to these port requirements. + +{{% accordion id="port-reqs-for-custom-clusters" label="Click to Expand" %}} + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with [custom nodes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/). + +{{< ports-custom-nodes >}} + +{{% /accordion %}} + +### Port Requirements for Clusters Hosted by an Infrastructure Provider + +If you are launching a Kubernetes cluster on nodes that are in an infastructure provider such as Amazon EC2, Google Container Engine, DigitalOcean, Azure, or vSphere, [these port requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/port-reqs-for-infrastructure-provider) apply. + +These required ports are automatically opened by Rancher during creation of clusters using cloud providers. + +{{% accordion id="port-reqs-for-infrastructure-providers" label="Click to Expand" %}} + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/). + +>**Note:** +>The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +{{< ports-iaas-nodes >}} + +{{% /accordion %}} + +#### Security Group for Nodes on AWS EC2 + +When using the [AWS EC2 node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Port Requirements for Clusters Hosted by a Kubernetes Provider + +If you are launching a cluster with a hosted Kubernetes provider such as Google Kubernetes Engine, Amazon EKS, or Azure Kubernetes Service, refer to these port requirements. + +{{% accordion id="port-reqs-for-hosted-kubernetes" label="Click to Expand" %}} + +The following table depicts the port requirements for nodes in [hosted Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters). + +{{< ports-imported-hosted >}} + +{{% /accordion %}} + +### Port Requirements for Imported Clusters + +If you are importing an existing cluster, refer to these port requirements. + +{{% accordion id="port-reqs-for-imported-clusters" label="Click to Expand" %}} + +The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.x/en/cluster-provisioning/imported-clusters/). + +{{< ports-imported-hosted >}} + +{{% /accordion %}} + +### Port Requirements for Local Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the port requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes/instances. \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md index 7711b93834d..dda1ab6e072 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/production/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/production/_index.md @@ -1,137 +1,49 @@ --- -title: Production Ready Cluster +title: Checklist for Production-Ready Clusters weight: 2005 --- -While Rancher makes it easy to create Kubernetes clusters, a production ready cluster takes more consideration and planning. There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. In the next sections each of the roles will be described in more detail. +In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. -When designing your cluster(s), you have two options: +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements) -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. +This is a shortlist of best practices that we strongly recommend for all production clusters. ->**Note:** Do not add the `worker` role to any node configured with either the `etcd` or `controlplane` role. This will make the nodes schedulable for regular workloads, which could interfere with critical cluster components running on the nodes with the `etcd` or `controlplane` role. +For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices) -## etcd +### Node Requirements -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. +* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) including the port requirements. ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. +### Back up etcd -### Hardware Requirements +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.x/en/backups/backups/ha-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. -Please see [Kubernetes: Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/) and [etcd: Hardware Recommendations](https://coreos.com/etcd/docs/latest/op-guide/hardware.html) for the hardware requirements. - -### Count of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones to survive the loss of one availability zone within a region. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [etcd cluster size](https://coreos.com/etcd/docs/latest/v2/admin_guide.html#optimal-cluster-size) -* [Operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Network Latency - -Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These settings allow etcd to run in most networks (except really high latency networks). - -References: - -* [etcd Tuning](https://coreos.com/etcd/docs/latest/tuning.html) - -### Backups - -etcd is the location where the state of your cluster is stored. Losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -## controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -References: - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) - -### Hardware Requirements - -Please see [Kubernetes: Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/) for the hardware requirements. - -### Count of controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. See below for a breakdown of how high availability is achieved per component. - -#### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -#### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -#### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -## worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -References: - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) - -### Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -### Count of worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -## Networking - -Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). - -## Cluster Diagram - -This diagram is applicable to Kubernetes clusters built using RKE or [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -## Production checklist +### Cluster Architecture * Nodes should have one of the following role configurations: * `etcd` * `controlplane` * `etcd` and `controlplane` * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Network traffic is only strictly allowed according to [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/). * Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. * Assign two or more nodes the `controlplane` role for master component high availability. * Assign two or more nodes the `worker` role for workload rescheduling upon node failure. -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. -* Perform load tests on your cluster to verify that its hardware can support your workloads. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) + +For more information about the recommended number of nodes for each Kubernetes role, refer to the [section on recommended architecture.]({{}}/rancher/v2.x/en/cluster/provisioning/recommended-architecture) + +### Logging and Monitoring + * Configure alerts/notifiers for Kubernetes components (System Service). * Configure logging for cluster analysis and post-mortems. -## RKE cluster running Rancher HA +### Reliability -You may have noticed that our [High Availability (HA) Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, as: +* Perform load tests on your cluster to verify that its hardware can support your workloads. -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. +### Networking + +* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md new file mode 100644 index 00000000000..59a68d37a81 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/_index.md @@ -0,0 +1,43 @@ +--- +title: Roles for Nodes in Kubernetes +weight: 1 +--- + +This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. + +This diagram is applicable to Kubernetes clusters built using RKE or [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/). + +![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
+Lines show the traffic flow between components. Colors are used purely for visual aid + +# etcd + +Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. + +>**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +# controlplane + +Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. + +>**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +### kube-apiserver + +The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. + +### kube-controller-manager + +The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +### kube-scheduler + +The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +# worker + +Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. + +# References + +* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md new file mode 100644 index 00000000000..1925a794cc0 --- /dev/null +++ b/content/rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/_index.md @@ -0,0 +1,74 @@ +--- +title: Recommended Cluster Architecture +weight: 1 +--- + +There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. + +# Separating Worker Nodes from Nodes with Other Roles + +When designing your cluster(s), you have two options: + +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/). +* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. + +In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. + +Therefore, each node should have one of the following role configurations: + + * `etcd` + * `controlplane` + * Both `etcd` and `controlplane` + * `worker` + +# Recommended Number of Nodes with Each Role + +The cluster should have: + +- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +- At least two nodes with the role `controlplane` for master component high availability. +- At least two nodes with the role `worker` for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles) + + +### Number of Controlplane Nodes + +Adding more than one node with the `controlplane` role makes every master component highly available. + +### Number of etcd Nodes + +The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. + +| Nodes with `etcd` role | Majority | Failure Tolerance | +|--------------|------------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | **1** | +| 4 | 3 | 1 | +| 5 | 3 | **2** | +| 6 | 4 | 2 | +| 7 | 4 | **3** | +| 8 | 5 | 3 | +| 9 | 5 | **4** | + +References: + +* [Official etcd documentation on optimal etcd cluster size](https://github.com/bmizerany/etcd-team/blob/master/Documentation/optimal-cluster-size.md) +* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) + +### Number of Worker Nodes + +Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. + +### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications + +You may have noticed that our [High Availability (HA) Install]({{}}/rancher/v2.x/en/installation/ha/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +# References + +* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rancher-agents/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rancher-agents/_index.md index c54329de49f..c33c0e85b48 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rancher-agents/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rancher-agents/_index.md @@ -14,7 +14,7 @@ The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher ### cattle-node-agent -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernets API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. > **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. @@ -27,8 +27,7 @@ _Applies to v2.3.0 and higher_ | `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | | `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `requiredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs -concepts/configuration/assign-pod-node/) to find more information about scheduling rules. +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `requiredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. The `requiredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 5722a01b0a6..fa9f9b61084 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -10,9 +10,10 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. ## Prerequisites - AWS EC2 Access Key and Secret key that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- IAM Policy created to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our two example JSON policies below: +- IAM Policy created to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - [Example IAM Policy](#example-iam-policy) - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) - IAM Policy added as Permission to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. @@ -39,7 +40,7 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. {{< step_create-cloud-credential >}} - **Zone and Network** configures the availability zone and network settings for your cluster. - - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{< baseurl >}}/rancher/v2.x/en/installation/references/#amazonec2-securitygroup-nodedriver) to see what rules are created in the `rancher-nodes` Security Group. + - **Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#amazonec2-securitygroup-nodedriver) to see what rules are created in the `rancher-nodes` Security Group. - **Instance** configures the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI.

If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. @@ -157,3 +158,45 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. ] } ``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md index 7e593486172..8c3cfafb88e 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/_index.md @@ -1,14 +1,14 @@ --- title: Cluster Options weight: 2250 ---- +--- As you configure a new cluster that's provisioned using [RKE]({{< baseurl >}}/rke/latest/en/), you can choose custom Kubernetes options. You can configure Kubernetes options one of two ways: - [Rancher UI](#rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Config File](#config-file): Alternatively, you can create a [RKE config file]({{< baseurl >}}/rke/latest/en/config-options/) to customize any option offered by Kubernetes. +- [Config File](#config-file): The cluster config file allows you to use any option offered by Kubernetes by specifying them in YAML. In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. ## Rancher UI @@ -58,13 +58,20 @@ If you want to see all the configuration options for a cluster, please click **S _Available as of v2.2.0_ -If you are using a private registry with authentication for your Docker images, please configure the registry in this section to allow the nodes to pull images from this registry. See [Private Registries]({{< baseurl >}}/rke/latest/en/config-options/private-registries/) for more information. +The registry configuration here is applied during the provisioning of the cluster. This option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images.]({{}}/rke/latest/en/config-options/add-ons/) + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +To deploy workloads that pull images from a private registry, you will need to [set up your own Kubernetes registry]({{}}/rancher/v2.x/en/k8s-in-rancher/registries/) for your project. + +See the [RKE documentation on private registries]({{< baseurl >}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. ### Authorized Cluster Endpoint _Available as of v2.2.0_ -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. This is enabled by default, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. It is recommended to create an FQDN pointing to a load balancer which load balances across your nodes with the `controlplane` role. If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate which will be included in the generated kubeconfig to validate the certificate chain. See the [Kubeconfig Files]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API Keys]({{< baseurl >}}/v2.x/en/user-settings/api-keys/#creating-an-api-key) documentation for more information. +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. This is enabled by default, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. It is recommended to create an FQDN pointing to a load balancer which load balances across your nodes with the `controlplane` role. If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate which will be included in the generated kubeconfig to validate the certificate chain. See the [Kubeconfig Files]({{}}/rancher/v2.x/en/k8s-in-rancher/kubeconfig/) and [API Keys]({{}}/rancher/v2.x/en/user-settings/api-keys/#creating-an-api-key) documentation for more information. ### Advanced Cluster Options @@ -107,7 +114,188 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad ![image]({{< baseurl >}}/img/rancher/cluster-options-yaml.png) -For an example of RKE config file syntax, see the [RKE documentation]({{< baseurl >}}/rke/latest/en/example-yamls/). +The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. + +### Config File Structure in Rancher v2.3.0+ + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.3.0+" %}} + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +{{% /accordion %}} + +### Config File Structure in Rancher v2.0.0-v2.2.x + +An example cluster config file is included below. + +{{% accordion id="prior-to-v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.0.0-v2.2.x" %}} +```yaml +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.15.3-rancher3-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 +ssh_agent_auth: false +``` +{{% /accordion %}} ### Default DNS provider @@ -149,3 +337,16 @@ local_cluster_auth_endpoint: fqdn: "FQDN" ca_certs: "BASE64_CACERT" ``` + +### Custom Network Plug-in + +_Available as of v2.2.4_ + +You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) \ No newline at end of file diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md index f1d67ca9a52..9042b9c9857 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md @@ -5,28 +5,30 @@ weight: 2240 _Available as of v2.3.0_ - When provisioning a [custom cluster]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/custom-clusters/) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes custom cluster on your existing infrastructure. You can use a mix of Linux and Windows hosts as your cluster nodes. Windows nodes can only be used for deploying workloads, while Linux nodes are required for cluster management. You can only add Windows nodes to a cluster if Windows support is enabled. Windows support can be enabled for new custom clusters that use Kubernetes 1.15+ and the Flannel network provider. Windows support cannot be enabled for existing clusters. +> Windows clusters have more requirements than Linux clusters. For example, Windows nodes must have 50 GB of disk space. Make sure your Windows cluster fulfills all of the [requirements.](#requirements-for-windows-clusters) + For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). This guide covers the following topics: + - [Prerequisites](#prerequisites) - [Requirements](#requirements-for-windows-clusters) - - [OS and Docker](#os-and-docker) - - [Hardware](#hardware) - - [Networking](#networking) - - [Architecture](#architecture) - - [Containers](#containers) + - [OS and Docker](#os-and-docker-requirements) + - [Nodes](#node-requirements) + - [Networking](#networking-requirements) + - [Architecture](#architecture-requirements) + - [Containers](#container-requirements) - [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) - [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) - + # Prerequisites @@ -38,26 +40,29 @@ Before provisioning a new cluster, be sure that you have already installed Ranch For a custom cluster, the general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). -### OS and Docker +### OS and Docker Requirements -In order to add Windows worker nodes to a cluster, the node must be running Windows Server 2019 (i.e. core version 1903 or above) and [Docker 19.03.]({{}}/rancher/v2.x/en/installation/requirements/) +In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): ->**Notes:** +- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. +- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. + +> **Notes:** > ->- If you are using AWS, Rancher recommends *Microsoft Windows Server 2019 Base with Containers* as the Amazon Machine Image (AMI). ->- If you are using GCE, Rancher recommends *Windows Server 2019 Datacenter for Containers* as the OS image. +> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). +> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. -### Hardware +### Node Requirements The hosts in the cluster need to have at least: - 2 core CPUs -- 4.5 GiB memory (~4.83 GB) -- 30 GiB of disk space (~32.21 GB) +- 5 GB memory +- 50 GB disk space Rancher will not provision the node if the node does not meet these requirements. -### Networking +### Networking Requirements Rancher only supports Windows using Flannel as the network provider. @@ -67,7 +72,7 @@ For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 ne For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. -### Architecture +### Architecture Requirements The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. @@ -77,15 +82,15 @@ We recommend the minimum three-node architecture listed in the table below, but -Node | Operating System | Kubernetes Cluster Role(s) | Purpose ---------|------------------|----------------------------|-------- -Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster -Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster -Node 3 | Windows (Windows Server 2019 required) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers +| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | +| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd-nodes), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) | Run your Windows containers | -### Containers +### Container Requirements -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server 2019 core version 1903. If you have existing containers built for an earlier Windows Server 2019 core version, they must be re-built on Windows Server 2019 core version 1903. +Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. # Tutorial: How to Create a Cluster with Windows Support @@ -96,11 +101,12 @@ When you provision a custom cluster with Rancher, you will add nodes to the clus To set up a custom cluster with support for Windows nodes and containers, you will need to complete the tasks below. + 1. [Provision Hosts](#1-provision-hosts) 1. [Create the Custom Cluster](#2-create-the-custom-cluster) 1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) 1. [Optional: Configuration for Azure Files](#5-optional-configuration-for-azure-files) - + # 1. Provision Hosts @@ -118,11 +124,11 @@ You will provision three nodes: - A second Linux node, which will be another worker node - The Windows node, which will run your Windows containers as a worker node -Node | Operating System ------|----------------- -Node 1 | Linux (Ubuntu Server 18.04 recommended) -Node 2 | Linux (Ubuntu Server 18.04 recommended) -Node 3 | Windows (Windows Server 2019 required) +| Node | Operating System | +| ------ | ------------------------------------------------------------ | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | +| Node 3 | Windows (Windows Server core version 1809 or above required) | If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) @@ -148,7 +154,7 @@ Windows support only be enabled if the cluster uses Kubernetes v1.15+ and the Fl 1. Click **Next**. -> **Important:** For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. +> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. # 3. Add Nodes to the Cluster @@ -178,8 +184,7 @@ It may take a few minutes for the node to be registered in your cluster. ### Add Linux Worker Node -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support *Rancher cluster agent*, *Metrics server*, *DNS* and *Ingress* for your cluster. - +After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. 1. From the **Global** view, click **Clusters.** @@ -199,11 +204,11 @@ After the initial provisioning of your custom cluster, your cluster only has a s > **Note:** Taints on Linux Worker Nodes > ->For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. +> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. ->Taint Key | Taint Value | Taint Effect ->---|---|--- ->`cattle.io/os` | `linux` | `NoSchedule` +> | Taint Key | Taint Value | Taint Effect | +> | -------------- | ----------- | ------------ | +> | `cattle.io/os` | `linux` | `NoSchedule` | ### Add a Windows Worker Node @@ -231,11 +236,11 @@ If you are using Azure VMs for your nodes, you can use [Azure files](https://doc In order to have the Azure platform create the required storage resources, follow these steps: -1. [Configure the Azure cloud provider.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) +1. [Configure the Azure cloud provider.]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/#azure) -1. Configure `kubectl` to connect to your cluster. +1. Configure `kubectl` to connect to your cluster. -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: +1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: --- apiVersion: rbac.authorization.k8s.io/v1 @@ -260,7 +265,7 @@ In order to have the Azure platform create the required storage resources, follo name: persistent-volume-binder namespace: kube-system -1. Create these in your cluster using one of the follow command. +1. Create these in your cluster using one of the follow command. ``` # kubectl create -f diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md index be87786d30e..e9986f6abae 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md @@ -23,8 +23,8 @@ For a summary of Kubernetes features supported in Windows, see [Using Windows in ## OS and Container Requirements -- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1803. -- You must build containers on Windows Server 1803 to run these containers on Windows Server 1803. +- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. +- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. ## Objectives for Creating Cluster with Windows Support @@ -55,7 +55,7 @@ Node | Operating System | Future Cluster Role(s) --------|------------------|------ Node 1 | Linux (Ubuntu Server 16.04 recommended) | [Control Plane]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#control-plane-nodes), [etcd]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#etcd), [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) Node 2 | Linux (Ubuntu Server 16.04 recommended) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) (This node is used for Ingress support) -Node 3 | Windows (*Windows Server 1803 required*) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) +Node 3 | Windows (Windows Server core version 1809 or above) | [Worker]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/#worker-nodes) ### Requirements @@ -103,8 +103,6 @@ Option | Setting Node Operating System | Linux Node Roles | etcd
Control Plane
Worker -![Recommended Linux Control Plane Configuration]({{< baseurl >}}/img/rancher/linux-control-plane.png) - When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#create-the-custom-cluster) from [step 8]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). diff --git a/content/rancher/v2.x/en/contributing/_index.md b/content/rancher/v2.x/en/contributing/_index.md index 772431348c0..375a42b995e 100644 --- a/content/rancher/v2.x/en/contributing/_index.md +++ b/content/rancher/v2.x/en/contributing/_index.md @@ -43,12 +43,10 @@ To see all libraries/projects used in Rancher, see the `vendor.conf` in the `ran # Building -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository (plus additional `trash` commands, please see below for more information about using `trash`), and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. +Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. -Dependencies on other libraries/projects are managed using [Trash](https://github.com/rancher/trash). See the [Trash README](https://github.com/rancher/trash/blob/master/README.md) to discover how it can be used. In short, it uses a `vendor.conf` file to specify the source repository and revision to fetch, checkout and copy to the `./vendor` directory. After updating `vendor.conf`, you can run `make trash` to update dependencies for your change. When the dependencies are updated, you can build the project again using `make` so that it will be built using the updated dependencies. - # Bugs, Issues or Questions If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. diff --git a/content/rancher/v2.x/en/faq/_index.md b/content/rancher/v2.x/en/faq/_index.md index 7e72c60a334..60f260984b5 100644 --- a/content/rancher/v2.x/en/faq/_index.md +++ b/content/rancher/v2.x/en/faq/_index.md @@ -7,121 +7,66 @@ aliases: This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. -See [Technical FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. +See [Technical FAQ]({{}}/rancher/v2.x/en/faq/technical/), for frequently asked technical questions. -### Kubernetes +
-#### What does it mean when you say Rancher v2.x is built on Kubernetes? - -Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. - -#### Do you plan to implement upstream Kubernetes, or continue to work on your own fork? - -We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. - -#### Does this release mean that we need to re-train our support staff in Kubernetes? - -Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. - -#### So, wait. Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI. - -No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. - -### Cattle - -### How does Rancher v2.x affect Cattle? - -Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. - -#### Can I migrate existing Cattle workloads into Kubernetes? - -Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. - -### Environments & Clusters - -#### Can I still create templates for environments and clusters? - -Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. -Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. - -#### Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher) - -Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. - -### Upgrading/Migrating - -#### How would the migration from v1.x to v2.x work? - -Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. - -#### Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters? - -At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. - -#### Can I import OpenShift Kubernetes clusters into v2.x? - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -### Support - -#### What about Rancher v1.6? Are you planning some long-term support releases? - -That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it for the next 12 months at a minimum. We will extend that time period, if necessary, depending on how quickly users move to v2.1. - -#### Does Rancher v2.x support Docker Swarm and Mesos as environment types? +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. -#### Is it possible to manage Azure Kubernetes Services with Rancher v2.x? +
+ +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + Yes. -#### What about Windows support? +
-With [Rancher 2.3.0 Preview 1](https://forums.rancher.com/t/rancher-release-v2-3-0-alpha3-preview-of-windows-containers/14260), we have enabled the support for Windows Server 2019 containers. The technology is in preview mode but we intend to make it GA later this year. Please refer to our documentation and Release Notes to get the latest information on this feature. +**Does Rancher support Windows?** -#### Are you planning on supporting Istio in Rancher v2.x? +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) -[Rancher 2.3.0 Preview 2](https://forums.rancher.com/t/rancher-release-v2-3-0-alpha5-preview-of-istio/14585/2) has support for Istio. Please refer to our documentation and Release Notes to get the latest information on this feature. -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along wtih any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/). +
-#### Will Rancher v2.x support Hashicorp's Vault for storing secrets? +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.x/en/cluster-admin/tools/istio/) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
+ +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** Secrets management is on our roadmap but we haven't assigned it to a specific release yet. -#### Does Rancher v2.x support RKT containers as well? +
+ +**Does Rancher v2.x support RKT containers as well?** At this time, we only support Docker. -#### Will Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes? +
-We will provide the ability to use Calico, Canal, and Flannel, but always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) on what is officially supported. +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** -#### Are you planning on supporting Traefik for existing setups? +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
+ +**Are you planning on supporting Traefik for existing setups?** We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. -### General +
-#### Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x? +**Can I import OpenShift Kubernetes clusters into v2.x?** -Yes. We plan to eventually enhance this feature so you can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. -#### Are you going to integrate Longhorn? +
-Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project once v2.0 reaches GA (general availability). +**Are you going to integrate Longhorn?** -#### Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC? - -The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. - -#### Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x? - -Yes. You can do so by leveraging Kubernetes' network policies. - -#### What about the CLI? Will that work the same way with the same features? - -Yes. Definitely. - -#### If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure? - -Absolutely. +Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md index eadea3ea782..5a2d100e8d8 100644 --- a/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md +++ b/content/rancher/v2.x/en/faq/networking/cni-providers/_index.md @@ -53,11 +53,11 @@ Canal is a CNI network provider that gives you the best of Flannel and Calico. I In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) -![Canal Diagram]({{< baseurl >}}/img/rancher/canal-diagram.png) +{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} -For more information, see the [Canal GitHub Page](https://github.com/projectcalico/canal). +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) #### Flannel @@ -67,7 +67,7 @@ Flannel is a simple and easy way to configure L3 network fabric designed for Kub Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. ![Flannel Diagram]({{< baseurl >}}/img/rancher/flannel-diagram.png) @@ -81,7 +81,7 @@ Calico enables networking and network policy in Kubernetes clusters across the c Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. -Kubernetes workers should open TCP port `179` (BGP). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. ![Calico Diagram]({{< baseurl >}}/img/rancher/calico-diagram.svg) @@ -99,7 +99,7 @@ _Available as of v2.2.0_ Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for more details. +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for more details. For more information, see the following pages: diff --git a/content/rancher/v2.x/en/faq/security/_index.md b/content/rancher/v2.x/en/faq/security/_index.md index 670cf73870b..733b79dbf05 100644 --- a/content/rancher/v2.x/en/faq/security/_index.md +++ b/content/rancher/v2.x/en/faq/security/_index.md @@ -4,10 +4,12 @@ weight: 8007 --- -### Is there a Hardening Guide? +**Is there a Hardening Guide?** The Hardening Guide is now located in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. -### What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked? +
+ +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{< baseurl >}}/rancher/v2.x/en/security/) section. diff --git a/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md b/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md new file mode 100644 index 00000000000..e0aa7ff6a3c --- /dev/null +++ b/content/rancher/v2.x/en/faq/upgrades-to-2x/_index.md @@ -0,0 +1,104 @@ +--- +title: Questions about Upgrading to Rancher v2.x +weight: 1 +--- + +This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. + +# Kubernetes + +**What does it mean when you say Rancher v2.x is built on Kubernetes?** + +Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. + +
+ +**Do you plan to implement upstream Kubernetes, or continue to work on your own fork?** + +We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. + +
+ +**Does this release mean that we need to re-train our support staff in Kubernetes?** + +Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. + +
+ +**Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI.** + +No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. + +
+ +**If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure?** + +Absolutely. + +# Cattle + +**How does Rancher v2.x affect Cattle?** + +Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. + +
+ +**Can I migrate existing Cattle workloads into Kubernetes?** + +Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. + +# Feature Changes + +**Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x?** + +Yes. You can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. + +
+ +**Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC?** + +The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. + +
+ +**Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x?** + +Yes. You can do so by leveraging Kubernetes' network policies. + +
+ +**What about the CLI? Will that work the same way with the same features?** + +Yes. Definitely. + +# Environments & Clusters + +**Can I still create templates for environments and clusters?** + +Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. + +Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. + +
+ +**Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher)** + +Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. + +# Upgrading/Migrating + +**How would the migration from v1.x to v2.x work?** + +Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. + +
+ +**Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters?** + +At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. + +# Support + +**Are you planning some long-term support releases for Rancher v1.6?** + +That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it. New releases of the v1.6 stream are announced in the [Rancher forums.](https://forums.rancher.com/c/announcements) The Rancher wiki contains the [v1.6 release notes.](https://github.com/rancher/rancher/wiki/Rancher-1.6) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/_index.md b/content/rancher/v2.x/en/installation/_index.md index 55431c72e4f..6ca571667e1 100644 --- a/content/rancher/v2.x/en/installation/_index.md +++ b/content/rancher/v2.x/en/installation/_index.md @@ -4,6 +4,8 @@ weight: 50 --- This section contains instructions for installing Rancher in development and production environments. +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.x/en/installation/requirements/) + ### Installation Options - [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/) @@ -13,13 +15,3 @@ This section contains instructions for installing Rancher in development and pro - [High Availability Installation]({{< baseurl >}}/rancher/v2.x/en/installation/ha/) This install scenario creates a new Kubernetes cluster dedicated to running Rancher Server in a high-availability (HA) configuration, which runs Rancher Server on multiple hosts so that it's always accessible provided that one of your cluster nodes is running. We recommend high-availability installs in production environments, where your user base requires 24/7 access to your applications. - -### Reference - -- [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/) - - A reference of hardware and software requirements for the server(s) hosting Rancher. - -- [Port Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/references/) - - List of required ports you must open to operate Rancher. diff --git a/content/rancher/v2.x/en/installation/air-gap/_index.md b/content/rancher/v2.x/en/installation/air-gap/_index.md index 61628b4a872..e4b72412447 100644 --- a/content/rancher/v2.x/en/installation/air-gap/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap/_index.md @@ -24,7 +24,7 @@ The following CLI tools are required for the HA install. Make sure these tools a * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. * [rke]({{< baseurl >}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. +* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. {{% /tab %}} {{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/air-gap/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap/install-rancher/_index.md index f679affe276..b395b88fcc3 100644 --- a/content/rancher/v2.x/en/installation/air-gap/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap/install-rancher/_index.md @@ -28,7 +28,7 @@ This section describes installing Rancher in five parts: From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. -1. If you haven't already, initialize `helm` locally on a workstation that has internet access. +1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. ```plain helm init -c @@ -213,7 +213,7 @@ If you are installing Rancher versions prior to v2.3.0, you will not be able to These resources could be helpful when installing Rancher: -- [Rancher Helm chart options]({{}}rancher/v2.x/en/installation/ha/helm-rancher/chart-options/) +- [Rancher Helm chart options]({{}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/) - [Adding TLS secrets]({{}}/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/) - [Troubleshooting Rancher HA installations]({{}}/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/) diff --git a/content/rancher/v2.x/en/installation/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.x/en/installation/air-gap/launch-kubernetes/_index.md index 42efda148ec..f675107a92c 100644 --- a/content/rancher/v2.x/en/installation/air-gap/launch-kubernetes/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap/launch-kubernetes/_index.md @@ -11,7 +11,7 @@ Rancher recommends installing Rancher in a Highly Available (HA) configuration. This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. -Since a HA installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{< baseurl >}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to create a RKE config file. +Since a HA installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{< baseurl >}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{< baseurl >}}/rke/latest/en/installation/) and create a RKE config file. ### A. Create an RKE Config File diff --git a/content/rancher/v2.x/en/installation/air-gap/populate-private-registry/_index.md b/content/rancher/v2.x/en/installation/air-gap/populate-private-registry/_index.md index 196a9242cba..7e96a9e1e42 100644 --- a/content/rancher/v2.x/en/installation/air-gap/populate-private-registry/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap/populate-private-registry/_index.md @@ -5,6 +5,8 @@ aliases: - /rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/ - /rancher/v2.x/en/installation/air-gap-high-availability/prepare-private-registry/ - /rancher/v2.x/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.x/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.x/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ --- >**Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. @@ -123,7 +125,7 @@ D. Populate the private registry ### Prerequisites -These steps expect you to use a Windows 1903 Server workstation that has internet access, access to your private registry, and at least 50 GB of disk space. +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. @@ -259,7 +261,7 @@ The workstation must have Docker 18.02+ in order to support manifests, which are ### D. Populate the private registry -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt`, `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. 1. Log into your private registry if required: diff --git a/content/rancher/v2.x/en/installation/ha/_index.md b/content/rancher/v2.x/en/installation/ha/_index.md index 63cef81af17..5c720e2373d 100644 --- a/content/rancher/v2.x/en/installation/ha/_index.md +++ b/content/rancher/v2.x/en/installation/ha/_index.md @@ -28,9 +28,7 @@ The following CLI tools are required for this install. Please make sure these to * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. * [rke]({{< baseurl >}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. - -> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. +* [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. ## Installation Outline diff --git a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md index 88b58cdc056..c22d03a5739 100644 --- a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/_index.md @@ -28,7 +28,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -![EC2 Load Balancing section]({{< baseurl >}}/img/rancher/ha/nlb/ec2-loadbalancing.png) +{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} Click **Create target group** to create the first target group, regarding TCP port 443. @@ -54,11 +54,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 443 settings**
-![Target group 443]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}}
**Screenshot Target group TCP port 443 Advanced settings**
-![Target group 443 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}}
@@ -86,11 +86,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 80 settings**
-![Target group 80]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}}
**Screenshot Target group TCP port 80 Advanced settings**
-![Target group 80 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}}
@@ -100,19 +100,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -![Edit target group 443]({{< baseurl >}}/img/rancher/ha/nlb/edit-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} Select the instances (Linux nodes) you want to add, and click **Add to registered**.
**Screenshot Add targets to target group TCP port 443**
-![Add targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/add-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}}
**Screenshot Added targets to target group TCP port 443**
-![Added targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/added-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} When the instances are added, click **Save** on the bottom right of the screen. diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md index 3dc99fdcd48..e0793246f7f 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md @@ -3,10 +3,13 @@ title: "3. Initialize Helm (Install Tiller)" weight: 195 --- - Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. -> **Note:** For systems without direct internet access see [Helm - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. +For systems without direct internet access, see [Helm - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. + +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) ### Install Tiller on the Cluster diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md index 246f58ba88c..ecf5b847dac 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md @@ -5,7 +5,11 @@ weight: 200 Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. -> **Note:** For systems without direct internet access see [Air Gap: High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). +For systems without direct internet access, see [Air Gap: High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/). + +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) ### Add the Helm Chart Repository diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md index 2d9d16bba1b..ae2bead7b0c 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md @@ -156,6 +156,17 @@ You may terminate the SSL/TLS on a L7 load balancer external to the Rancher clus Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. +#### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + #### Required Headers * `Host` diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md index 9ba02bc211a..41ce4337fa8 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md @@ -36,7 +36,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -![EC2 Load Balancing section]({{< baseurl >}}/img/rancher/ha/nlb/ec2-loadbalancing.png) +{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} Click **Create target group** to create the first target group, regarding TCP port 443. @@ -62,11 +62,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 443 settings**
-![Target group 443]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}}
**Screenshot Target group TCP port 443 Advanced settings**
-![Target group 443 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-443-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}}
@@ -94,11 +94,11 @@ Success codes | `200-399`
**Screenshot Target group TCP port 80 settings**
-![Target group 80]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}}
**Screenshot Target group TCP port 80 Advanced settings**
-![Target group 80 Advanced]({{< baseurl >}}/img/rancher/ha/nlb/create-targetgroup-80-advanced.png) +{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}}
@@ -108,19 +108,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -![Edit target group 443]({{< baseurl >}}/img/rancher/ha/nlb/edit-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} Select the instances (Linux nodes) you want to add, and click **Add to registered**.
**Screenshot Add targets to target group TCP port 443**
-![Add targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/add-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}}
**Screenshot Added targets to target group TCP port 443**
-![Added targets to target group 443]({{< baseurl >}}/img/rancher/ha/nlb/added-targets-targetgroup-443.png) +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} When the instances are added, click **Save** on the bottom right of the screen. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md index 543664669d9..f077118c92a 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md @@ -15,7 +15,7 @@ Below are steps that you can follow to determine what is wrong in your cluster. ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) are opened in your (host) firewall. +Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. ### All nodes should be present and in **Ready** state @@ -143,7 +143,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition => End ``` -If you see error in the output, that means that the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for overlay networking are not opened between the hosts indicated. +If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. Example error output of a situation where NODE1 had the UDP ports blocked. diff --git a/content/rancher/v2.x/en/installation/helm-version/_index.md b/content/rancher/v2.x/en/installation/helm-version/_index.md new file mode 100644 index 00000000000..2c07d4c4dee --- /dev/null +++ b/content/rancher/v2.x/en/installation/helm-version/_index.md @@ -0,0 +1,12 @@ +--- +title: Helm Version Requirements +weight: 400 +--- + +This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) + +- Helm v2.15.1 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. +- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. +- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/_index.md b/content/rancher/v2.x/en/installation/options/_index.md index 4ac3ff85801..1f2c461c7be 100644 --- a/content/rancher/v2.x/en/installation/options/_index.md +++ b/content/rancher/v2.x/en/installation/options/_index.md @@ -11,4 +11,4 @@ When installing Rancher, there are several advanced options that can be enabled | [API Audit Log]({{}}/rancher/v2.x/en/installation/options/api-audit-log/) | v2.0.0 | | [TLS Settings]({{}}/rancher/v2.x/en/installation/options/tls-settings/) | v2.1.7 | | [etcd configuration]({{}}/rancher/v2.x/en/installation/options/etcd/) | v2.2.0 | -| [Local System Charts for Air Gap Installations]({{}})/rancher/v2.x/en/installation/options/local-system-charts | v2.3.0 | +| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.x/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md b/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md index 147153aea90..e2a2d02dc94 100644 --- a/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md +++ b/content/rancher/v2.x/en/installation/options/api-audit-log/_index.md @@ -67,7 +67,7 @@ kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log ![Local Cluster: System Project]({{< baseurl >}}/img/rancher/audit_logs_gui/context_local_system.png) -1. From the **Workloads** tab, find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. +1. From the main navigation bar, choose **Resources > Workloads.** (In versions prior to v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. ![Rancher Workload]({{< baseurl >}}/img/rancher/audit_logs_gui/rancher_workload.png) diff --git a/content/rancher/v2.x/en/installation/options/firewall/_index.md b/content/rancher/v2.x/en/installation/options/firewall/_index.md new file mode 100644 index 00000000000..601d8c046ee --- /dev/null +++ b/content/rancher/v2.x/en/installation/options/firewall/_index.md @@ -0,0 +1,106 @@ +--- +title: Opening Ports with firewalld +weight: 12000 +--- + +Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. + +For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: + +``` +Chain INPUT (policy ACCEPT) +target prot opt source destination +ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +ACCEPT icmp -- anywhere anywhere +ACCEPT all -- anywhere anywhere +ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain FORWARD (policy ACCEPT) +target prot opt source destination +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +``` + +You can check the default firewall rules with this command: + +``` +sudo iptables --list +``` + +This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.x/en/installation/references) for nodes in a high-availability Rancher server cluster. + +# Prerequisite + +Install v7.x or later ofv`firewalld`: + +``` +yum install firewalld +systemctl start firewalld +systemctl enable firewalld +``` + +# Applying Firewall Port Rules + +In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: + +``` +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` +If your Rancher server nodes have separate roles, use the following commands based on the role of the node: + +``` +# For etcd nodes, run the following commands: +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp + +# For control plane nodes, run the following commands: +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp + +# For worker nodes, run the following commands: +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` + +After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: + +``` +firewall-cmd --reload +``` + +**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md b/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md index 6990ea56e7c..89d32c01671 100644 --- a/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md +++ b/content/rancher/v2.x/en/installation/options/local-system-charts/_index.md @@ -52,11 +52,11 @@ In the catalog management page in the Rancher UI, follow these steps: 1. Open `https:///v3/catalogs/system-library` in your browser. - ![Open]({{< baseurl >}}/img/rancher/airgap/system-charts-setting.png) + {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} 1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - ![Update]({{< baseurl >}}/img/rancher/airgap/system-charts-update.png) + {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} 1. Click **Show Request** @@ -65,4 +65,4 @@ In the catalog management page in the Rancher UI, follow these steps: **Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. {{% /tab %}} -{{% /tabs %}} \ No newline at end of file +{{% /tabs %}} diff --git a/content/rancher/v2.x/en/installation/references/_index.md b/content/rancher/v2.x/en/installation/references/_index.md index 7e100be0eac..5c79484b9e1 100644 --- a/content/rancher/v2.x/en/installation/references/_index.md +++ b/content/rancher/v2.x/en/installation/references/_index.md @@ -1,8 +1,6 @@ --- title: Port Requirements weight: 300 -aliases: - - /rancher/v2.x/en/hosts/amazon/#required-ports-for-rancher-to-work/ --- To operate properly, Rancher requires a number of ports to be open on Rancher nodes and Kubernetes cluster nodes. @@ -13,7 +11,7 @@ The following table lists the ports that need to be open to and from nodes that {{< ports-rancher-nodes >}} -**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{< baseurl >}}rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). +**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). ## Kubernetes Cluster Nodes diff --git a/content/rancher/v2.x/en/installation/requirements/_index.md b/content/rancher/v2.x/en/installation/requirements/_index.md index 01a6c066295..722a465a8f3 100644 --- a/content/rancher/v2.x/en/installation/requirements/_index.md +++ b/content/rancher/v2.x/en/installation/requirements/_index.md @@ -1,94 +1,140 @@ --- -title: Node Requirements +title: Installation Requirements weight: 1 aliases: + - /rancher/v2.x/en/hosts/amazon/#required-ports-for-rancher-to-work/ + - /rancher/v2.x/en/installation/references --- -Whether you're configuring Rancher to run in a single-node or high-availability setup, each node running Rancher Server must meet the following requirements. +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +> It is important to note that if you install Rancher on a Kubernetes cluster, the hardware and networking requirements for the Rancher cluster are different than the [node requirements for user clusters,]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/) which will run your apps and services. + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Docker Requirements](#operating-systems-and-docker-requirements) +- [Hardware Requirements](#hardware-requirements) + - [CPU and Memory](#cpu-and-memory) + - [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.x/en/best-practices/deployment-types/) + +# Operating Systems and Docker Requirements -{{% tabs %}} -{{% tab "Operating Systems and Docker" %}} -
Rancher is tested on the following operating systems and their subsequent non-major releases with a supported version of [Docker](https://www.docker.com/). -* Ubuntu 16.04 (64-bit x86) - * Docker 17.03.x, 18.06.x, 18.09.x -* Ubuntu 18.04 (64-bit x86) - * Docker 18.06.x, 18.09.x -* Red Hat Enterprise Linux (RHEL)/CentOS 7.6 (64-bit x86) - * RHEL Docker 1.13 - * Docker 17.03.x, 18.06.x, 18.09.x -* RancherOS 1.5.1 (64-bit x86) - * Docker 17.03.x, 18.06.x, 18.09.x -* Windows Server 2019 (64-bit x86) - * Docker 19.03 - * Supported for worker nodes only. See [Configuring Custom Clusters for Windows]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.3.0/) -If you are using RancherOS, make sure you switch the Docker engine to a supported version using:
-``` -# Look up available versions -sudo ros engine list +Operating System | Tested Docker Versions +---------------------|-------------------------- +Ubuntu 16.04 (64-bit x86) | Docker 17.03.x, 18.06.x, 18.09.x, 19.03.x +Ubuntu 18.04 (64-bit x86) | Docker 18.06.x, 18.09.x, 19.03.x +Red Hat Enterprise Linux (RHEL)/CentOS 7.7 (64-bit x86) | RHEL Docker 1.13 +Oracle Linux 7 update 6* (64-bit x86) | Docker 17.03.x, 18.06.x, 18.09.x, 19.03.x +RancherOS 1.5.4 (64-bit x86) | Docker 17.03.x, 18.06.x, 18.09.x, 19.03.x +Windows Server 2019 (64-bit x86) | Requires Docker Engine - Enterprise Edition (EE).** -# Switch to a supported version -sudo ros engine switch docker-18.09.2 -``` -See [Running on ARM64 (Experimental)]({{< baseurl >}}/rancher/v2.x/en/installation/arm64-platform/) if you plan to run Rancher on ARM64. -
-
-[Docker Documentation: Installation Instructions](https://docs.docker.com/) -
-
-{{% /tab %}} -{{% tab "Hardware" %}} -
-Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. +\* Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. This [how-to guide]({{}}/rancher/v2.x/en/installation/options/firewall) shows how to check the default firewall rules and how to open the ports with `firewalld` if necessary. +\** Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. Supported for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/) -**[HA Node]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/) Requirements** +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental)]({{}}/rancher/v2.x/en/installation/arm64-platform/) -Deployment Size | Clusters | Nodes | vCPUs | RAM | ---- | --- | --- | --- | --- | -Small | Up to 5 | Up to 50 | 2 | 8 GB | -Medium | Up to 15 | Up to 200 | 4 | 16 GB | -Large | Up to 50 | Up to 500 | 8 | 32 GB | -X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | -XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | +For information on how to install Docker, refer to the offical [Docker documentation.](https://docs.docker.com/) -
+# Hardware Requirements -**[Single Node]({{< baseurl >}}/rancher/v2.x/en/installation/single-node/) Requirements** +This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. -Deployment Size | Clusters | Nodes | vCPUs | RAM | ---- | --- | --- | --- | --- | -Small | Up to 5 | Up to 50 | 1 | 4 GB | -Medium | Up to 15 | Up to 200 | 2 | 8 GB | +### CPU and Memory -
+Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher on a single node or on a high-availability (HA) cluster. -**Disks** +For production environments, the Rancher server should be installed on an HA cluster. -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPs. In larger clusters consider using dedicated storage devices for etcd data and wal directories. +Rancher can also be installed on a single node in a development or testing environment. -
+{{% tabs %}} +{{% tab "HA Node Requirements" %}} + +These requirements apply to [HA installations]({{}}/rancher/v2.x/en/installation/ha/) of Rancher. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | +| Small | Up to 5 | Up to 50 | 2 | 8 GB | +| Medium | Up to 15 | Up to 200 | 4 | 16 GB | +| Large | Up to 50 | Up to 500 | 8 | 32 GB | +| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | +| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | {{% /tab %}} -{{% tab "Networking" %}} -
+{{% tab "Single Node Requirements" %}} -### Node IP Address +These requirements apply to [single node]({{}}/rancher/v2.x/en/installation/single-node/) installations of Rancher. -Each node used (either for the Single Node Install, High Availability (HA) Install or nodes that are used in clusters) should have a static IP configured. In case of DHCP, the nodes should have a DHCP reservation to make sure the node gets the same IP allocated. +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + +{{% /tab %}} +{{% /tabs %}} + +### Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. ### Port Requirements -When deploying Rancher in an HA cluster, certain ports on your nodes must be open to allow communication with Rancher. The ports that must be open change according to the type of machines hosting your cluster nodes. For example, if your are deploying Rancher on nodes hosted by an infrastructure, port `22` must be open for SSH. The following diagram depicts the ports that are opened for each [cluster type]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning). +This section describes the port requirements for nodes running the `rancher/rancher` container. -
Cluster Type Port Requirements
+The port requirements are different depending on whether you are installing Rancher on a single node or on a high-availability Kubernetes cluster. For a single node, you only need to open the [ports required to enable Rancher to communicate with user clusters.](#port-requirements-for-enabling-rancher-to-communicate-with-user-clusters) For a high-availability installation, the same ports need to be opened, as well as additional [ports required to set up the Kubernetes cluster](#additional-port-requirements-for-nodes-in-high-availability-rancher-installations) that Rancher is installed on. -![Basic Port Requirements]({{< baseurl >}}/img/rancher/port-communications.svg) +### Port Requirements for Enabling Rancher to Communicate with User Clusters -{{< requirements_ports_rancher >}} -{{< requirements_ports_rke >}} -{{< ports_aws_securitygroup_nodedriver >}} -{{% /tab %}} -{{% /tabs %}} +For a single-node installation, you only need to open the ports for the Rancher management plane. These ports are opened to allow the Rancher server to communicate with the Kubernetes clusters that will run your apps and services. + +For a high-availability installation, these rules apply as well as the [port requirements to set up the Kubernetes cluster](#additional-port-requirements-for-nodes-in-high-availability-rancher-installations) that Rancher is installed on. + +The port requirements are different based the infrastructure you are using. For example, if you are deploying Rancher on nodes hosted by an infrastructure provider, port `22` must be open for SSH. The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.x/en/cluster-provisioning). + +
Port Requirements for the Rancher Management Plane
+ +![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) + +The following tables break down the port requirements for inbound and outbound traffic: + +
Inbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +|------------|-------|---------|----------------| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | + +
Outbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +|------------|-------|---------|----------------| +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | `35.160.43.145/32`, `35.167.242.46/32`, `52.33.59.17/32` | git.rancher.io (catalogs) | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +**Note** Rancher nodes may also require additional outbound access for any external [authentication provider]({{< baseurl >}}rancher/v2.x/en/admin-settings/authentication/) which is configured (LDAP for example). + +### Additional Port Requirements for Nodes in High-Availability Rancher Installations + +You will need to open additional ports to the launch the Kubernetes cluster that is required for a high-availability installation of Rancher. + +The ports that need to be opened for each node depend on the node's Kubernetes role: etcd, controlplane, or worker. For a breakdown of the port requirements for each role, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) \ No newline at end of file diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index 2b5a412831a..0bce71fe02f 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -1,9 +1,15 @@ --- -title: Choosing a Version +title: Choosing a Rancher Version weight: 230 --- -## Single Node Installs +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a Helm chart on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. + +For single node installations of Rancher, which is used for development and testing, you will install Rancher as a Docker image. + +# Single Node Installs When performing [single-node installs]({{< baseurl >}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. @@ -17,9 +23,6 @@ Tag | Description `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. - - - >**Notes:** > >- The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. @@ -27,11 +30,12 @@ Tag | Description > > _Caveat:_ Alpha releases cannot be upgraded to or from any other release. - -## High Availability Installs +# High Availability Installs When installing, upgrading, or rolling back Rancher Server in a [high availability configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. +Refer to the [Helm version requirements]({{}}/rancher/v2.x/en/installation/helm-version) to choose a version of Helm to install Rancher. + ### Helm Chart Repositories Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a single node installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md index 4831c0d9b86..faf95b4b847 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/configmaps/_index.md @@ -34,7 +34,7 @@ ConfigMaps store general configuration information for an application, such as c > >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. > - > ![Bulk Key Value Pair Copy/Paste]({{< baseurl >}}/img/rancher/bulk-key-values.gif) + > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} **Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index ea95ace61d8..55580b4bb16 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -18,8 +18,8 @@ The way that you manage HPAs is different based on your version of the Kubernete HPAs are also managed differently based on your version of Rancher: -- **For Rancher Prior to v2.3.0-alpha5:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). -- **For Rancher v2.3.0-alpha5+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher Prior to v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). You might have additional HPA installation steps if you are using an older version of Rancher: @@ -28,7 +28,7 @@ You might have additional HPA installation steps if you are using an older versi ## Testing HPAs with a Service Deployment -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project's **HPA** tab. For more information, refer to [Get HPA Metrics and Status]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). +In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] ({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). \ No newline at end of file diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md index de2393a8370..d0c7e6cdb9e 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md @@ -3,7 +3,7 @@ title: Managing HPAs with the Rancher UI weight: 3028 --- -_Available as of v2.3.0-alpha5_ +_Available as of v2.3.0_ The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. @@ -13,7 +13,7 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. From the **Global** view, open the project that you want to deploy a HPA to. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. +1. Click **Resources > HPA.** 1. Click **Add HPA.** @@ -29,13 +29,13 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. Click **Create** to create the HPA. -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Workloads > HPA view. +> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. ## Get HPA Metrics and Status 1. From the **Global** view, open the project with the HPAs you want to look at. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. The **HPA** tab shows the number of current replicas. +1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. 1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. @@ -44,7 +44,7 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. From the **Global** view, open the project that you want to delete an HPA from. -1. Select **Workloads** in the navigation bar and then select the **HPA** tab. +1. Click **Resources > HPA.** 1. Find the HPA which you would like to delete. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md index c2821d5c62a..ef8e7c8d762 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/_index.md @@ -3,13 +3,13 @@ title: Load Balancing and Ingresses weight: 3040 --- -Within Rancher, you can setup load balancers and ingress controllers to redirect service requests. +Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. ## Load Balancers After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. +If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. Rancher supports two types of load balancers: @@ -33,18 +33,28 @@ Load Balancers have a couple of limitations you should be aware of: ## Ingress -As mentioned in the limitations above, using a load balancer per service can be expensive. You can get around this issue using an ingress. +As mentioned in the limitations above, the disadvantages of using a load balancer are: -Ingress is a set or rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. +- Load Balancers can only handle one IP address per service. +- If you run multiple services in your cluster, you must have a load balancer for each service. +- It can be expensive to have a load balancer for every service. -Your load balancer can either reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launcher clusters are powered by [Nginx](https://www.nginx.com/). +In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. + +Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. + +Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. + +Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. + +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launcher clusters are powered by [Nginx](https://www.nginx.com/). Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. >**Using Rancher in a High Availability Configuration?** > ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global load balancer for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. +>Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. -- For more information on how to setup ingress in Rancher, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). +- For more information on how to set up ingress in Rancher, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) - When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{< baseurl >}}/rancher/v2.x/en/catalog/globaldns/). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md index efa6b5d13f1..ce3ced278ed 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md @@ -9,7 +9,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. From the **Global** view, open the project that you want to add ingress to. -1. Select the **Load Balancing** tab. Then click **Add Ingress**. +1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions prior to v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. 1. Enter a **Name** for the ingress. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md index 77afd316aa7..c00f76846db 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md @@ -8,7 +8,11 @@ Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer ## Layer-4 Load Balancer -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. Layer-4 load balancer is supported by the underlying cloud provider. As a result, when you deploy RKE clusters on bare metal servers and vSphere clusters, layer-4 load balancer is not supported. +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. ### Support for Layer-4 Load Balancing @@ -16,13 +20,16 @@ Support for layer-4 load balancer varies based on the underlying cloud provider. Cluster Deployment | Layer-4 Load Balancer Support ----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Not Supported -RKE on vSphere | Not Supported -RKE on Custom Hosts
(e.g. bare-metal servers) | Not Supported +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) ## Layer-7 Load Balancer diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md index edf934e3ab4..0efcd6c9095 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/_index.md @@ -41,7 +41,7 @@ After the version control provider is authorized, you are automatically re-direc 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Click on **Configure Repositories**. @@ -59,7 +59,7 @@ Now that repositories are added to your project, you can start configuring the p 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the repository that you want to set up a pipeline for. Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Throughout the next couple of steps, we'll provide the options of how to do pipeline configuration through the UI or the YAML file. @@ -231,7 +231,7 @@ timeout: 30 ## Running your Pipelines -Run your pipeline for the first time. From the **Pipeline** tab, find your pipeline and select the vertical **Ellipsis (...) > Run**. +Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions prior to v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **Ellipsis (...) > Run**. During this initial run, your pipeline is tested, and the following [pipeline components]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines/#how-pipelines-work) are deployed to your project as workloads in a new namespace dedicated to the pipeline: @@ -257,7 +257,7 @@ Available Events: 1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. 1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the repository that you want to modify the event triggers. Select the vertical **Ellipsis (...) > Setting**. @@ -553,7 +553,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. 1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -571,7 +571,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. {{% tab "Stage Trigger" %}} 1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -596,7 +596,7 @@ Wildcard character (`*`) expansion is supported in `branch` conditions. {{% tab "Step Trigger" %}} 1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the repository for which you want to manage trigger rules, select the vertical **Ellipsis (...) > Edit Config**. @@ -654,7 +654,7 @@ When configuring a pipeline, certain [step types](#step-types) allow you to use {{% tab "By UI" %}} 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the pipeline for which you want to edit build triggers, select **Ellipsis (...) > Edit Config**. @@ -703,7 +703,7 @@ Create a secret in the same project as your pipeline, or explicitly in the names {{% tab "By UI" %}} 1. From the **Global** view, navigate to the project that you want to configure pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. From the pipeline for which you want to edit build triggers, select **Ellipsis (...) > Edit Config**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md index 7c2136b481f..be9733f856a 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example-repos/_index.md @@ -11,7 +11,7 @@ Rancher ships with several example repositories that you can use to familiarize - Maven - php -> **Note**: The example repositories are only available if you have not [configured a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines). +> **Note:** The example repositories are only available if you have not [configured a version control provider]({{< baseurl >}}/rancher/v2.x/en/project-admin/tools/pipelines). ## Configure Repositories @@ -19,7 +19,7 @@ By default, the example pipeline repositories are disabled. Enable one (or more) 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Click **Configure Repositories**. @@ -45,7 +45,7 @@ After enabling an example repository, review the pipeline to see how it is set u 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the example repository, select the vertical **Ellipsis (...)**. There are two ways to view the pipeline: * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. @@ -57,7 +57,7 @@ After enabling an example repository, run the pipeline to see how it works. 1. From the **Global** view, navigate to the project that you want to test out pipelines. -1. Select **Workloads** in the navigation bar and then select the **Pipelines** tab. +1. Click **Resources > Pipelines.** In versions prior to v2.3.0, click **Workloads > Pipelines.** 1. Find the example repository, select the vertical **Ellipsis (...) > Run**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md index 2134780953a..0b756ed4de9 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/pipelines/example/_index.md @@ -7,7 +7,7 @@ aliases: Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. -In the [pipeline configuration docs](), we provide examples of each available feature within pipelines. Here is a full example for those who want to jump rigt in. +In the [pipeline configuration docs](), we provide examples of each available feature within pipelines. Here is a full example for those who want to jump right in. ```yaml # example diff --git a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md index 7a2b342191c..895c6d79102 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/registries/_index.md @@ -46,7 +46,7 @@ You can deploy a workload with an image from a private registry through the Ranc To deploy a workload with an image from your private registry, 1. Go to the project view, -1. Go to the **Workloads** tab. +1. Click **Resources > Workloads.** In versions prior to v2.3.0, go to the **Workloads** tab. 1. Click **Deploy.** 1. Enter a unique name for the workload and choose a namespace. 1. In the **Docker Image** field, enter the URL of the path to the Docker image in your private registry. For example, if your private registry is on Quay.io, you could use `quay.io//`. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md index 5b958d9dcb5..3c648e07f92 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/secrets/_index.md @@ -27,7 +27,7 @@ When creating a secret, you can make it available for any deployment within a pr >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. > - > ![Bulk Key Value Pair Copy/Paste]({{< baseurl >}}/img/rancher/bulk-key-values.gif) + > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} 1. Click **Save**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md index 39308578377..6b0b289ef04 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/service-discovery/_index.md @@ -12,7 +12,7 @@ However, you also have the option of creating additional Service Discovery recor 1. From the **Global** view, open the project that you want to add a DNS record to. -1. Select the **Service Discovery** tab. Then click **Add Record**. +1. Click **Resources** in the main navigation bar. Click the **Service Discovery** tab. (In versions prior to v2.3.0, just click the **Service Discovery** tab.) Then click **Add Record**. 1. Enter a **Name** for the DNS record. This name is used for DNS resolution. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md index e5c6c4f503e..abe6b1c5fe7 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md @@ -8,7 +8,7 @@ A _sidecar_ is a container that extends or enhances the main container in a pod. 1. From the **Global** view, open the project running the workload you want to add a sidecar to. -1. Select the **Workloads** tab. +1. Click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. 1. Find the workload that you want to extend. Select **Ellipsis icon (...) > Add a Sidecar**. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md index ab046ce7d06..0ae69d79bd4 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/_index.md @@ -9,13 +9,13 @@ Deploy a workload to run an application in one or more containers. 1. From the **Global** view, open the project that you want to deploy a workload to. -1. From the **Workloads** view, click **Deploy**. +1. 1. Click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. 1. Enter a **Name** for the workload. 1. Select a [workload type]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, by can change the workload type by clicking **More options.** -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. 1. Either select an existing [namespace]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/#namespaces), or click **Add to a new namespace** and enter a new namespace. diff --git a/content/rancher/v2.x/en/overview/architecture/_index.md b/content/rancher/v2.x/en/overview/architecture/_index.md index 63833ffcd4c..863c1c780d2 100644 --- a/content/rancher/v2.x/en/overview/architecture/_index.md +++ b/content/rancher/v2.x/en/overview/architecture/_index.md @@ -33,7 +33,7 @@ A Kubernetes cluster consists of multiple nodes. The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by Rancher Kubernetes Engine (RKE) and another created by Amazon EKS (Elastic Kubernetes Service). ![Architecture]({{< baseurl >}}/img/rancher/rancher-architecture.svg) diff --git a/content/rancher/v2.x/en/project-admin/istio/_index.md b/content/rancher/v2.x/en/project-admin/istio/_index.md index e2d66e00b7e..82ef83353cf 100644 --- a/content/rancher/v2.x/en/project-admin/istio/_index.md +++ b/content/rancher/v2.x/en/project-admin/istio/_index.md @@ -3,7 +3,7 @@ title: Istio weight: 3528 --- -_Available as of v2.3.0-alpha5_ +_Available as of v2.3.0_ Using Rancher, you can connect, secure, control, and observe services through integration with [Istio](https://istio.io/), a leading open-source service mesh solution. Istio provides behavioral insights and operational control over the service mesh as a whole, offering a complete solution to satisfy the diverse requirements of microservice applications. diff --git a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md index d68b22c393b..fa2eb991c2c 100644 --- a/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/alerts/_index.md @@ -9,6 +9,13 @@ Alerts are sets of rules, chosen by you, to monitor for specific events. Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. +This section covers the following topics: + +- [Alerts scope](#alerts-scope) +- [Default project-level alerts](#default-project-level-alerts) +- [Adding project alerts](#adding-project-alerts) +- [Managing project alerts](#managing-project-alerts) + ## Alerts Scope The scope for alerts can be set at either the [cluster level]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/alerts/) or project level. @@ -20,6 +27,17 @@ At the project level, Rancher monitors specific deployments and sends alerts for * Pod status * The Prometheus expression cross the thresholds +## Default Project-level Alerts + +When you enable monitoring for the project, some project-level alerts are provided. + +| Alert | Explanation | +|-------|-------------| +| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | +| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | + +For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.x/en/cluster-admin/tools/alerts/default-alerts) + ## Adding Project Alerts >**Prerequisite:** Before you can receive project alerts, you must [add a notifier]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/tools/notifiers/#adding-notifiers). @@ -31,6 +49,7 @@ At the project level, Rancher monitors specific deployments and sends alerts for 1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. 1. Based on the type of alert you want to create, complete one of the instruction subsets below. + {{% accordion id="pod" label="Pod Alerts" %}} This alert type monitors for the status of a specific pod. @@ -146,14 +165,14 @@ If you enable [project monitoring]({{< baseurl >}}/rancher/v2.x/en/project-admin 1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{< baseurl >}}/rancher/v2.x/en/cluster-admin/notifiers/) that send you alerts. +1. Finally, choose the [notifiers]({{< baseurl >}}//rancher/v2.x/en/cluster-admin/tools/notifiers/) that send you alerts. - You can set up multiple notifiers. - You can change notifier recipients on the fly. **Result:** Your alert is configured. A notification is sent when the alert is triggered. -#### Managing Project Alerts +## Managing Project Alerts To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions prior to v2.2.0, you can choose **Resources > Alerts**. You can: @@ -161,4 +180,4 @@ To manage project alerts, browse to the project that alerts you want to manage. - Edit alert settings - Delete unnecessary alerts - Mute firing alerts -- Unmute muted alerts +- Unmute muted alerts \ No newline at end of file diff --git a/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md b/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md index 8b7cc985f05..4628689ee68 100644 --- a/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/pipelines/_index.md @@ -7,10 +7,6 @@ aliases: - /rancher/v2.x/en/tools/pipelines/ - /rancher/v2.x/en/tools/pipelines/configurations/ --- ->**Notes:** -> ->- Pipelines are new and improved for Rancher v2.1! Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). A _pipeline_ is a software delivery process that is broken into different stages and steps. Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. @@ -30,6 +26,12 @@ Typically, pipeline stages include: Only [administrators]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{< baseurl >}}/rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/#project-roles) can [configure version control providers](#version-control-providers) and [manage global pipeline execution settings](#managing-global-pipeline-execution-settings). Project members can only configure [repositories]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#configuring-repositories) and [pipelines]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/pipelines/#pipeline-configuration). + +> **Notes:** +> +> - Pipelines were improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. +> - Still using v2.0.x? See the pipeline documentation for [previous versions]({{< baseurl >}}/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x). + ## Overview Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. @@ -254,7 +256,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin ### A. Configuring Persistent Data for Docker Registry -1. From the project that you're configuring a pipeline for, select the **Workloads** tab. +1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions prior to v2.3.0, select the **Workloads** tab. 1. Find the `docker-registry` workload and select **Ellipsis (...) > Edit**. @@ -301,7 +303,7 @@ The internal [Docker registry](#how-pipelines-work) and the [Minio](#how-pipelin ### B. Configuring Persistent Data for Minio -1. From the **Workloads** tab, find the `minio` workload and select **Ellipsis (...) > Edit**. +1. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **Ellipsis (...) > Edit**. 1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: diff --git a/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md index 0afe3b45e5f..7f78c61749e 100644 --- a/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md +++ b/content/rancher/v2.x/en/project-admin/tools/pipelines/docs-for-v2.0.x/_index.md @@ -35,9 +35,7 @@ You can set up your pipeline to run a series of stages and steps to test your co 1. Go to the project you want this pipeline to run in. -2. Select workloads from the top level Nav bar - -3. Select pipelines from the secondary Nav bar +2. Click **Resources > Pipelines.** In versions prior to v2.3.0,click **Workloads > Pipelines.** 4. Click Add pipeline button. diff --git a/content/rancher/v2.x/en/quick-start-guide/_index.md b/content/rancher/v2.x/en/quick-start-guide/_index.md index 5cdb50a4b1a..e9b6640327a 100644 --- a/content/rancher/v2.x/en/quick-start-guide/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/_index.md @@ -11,4 +11,6 @@ We have Quick Start Guides for: - [Deploying Rancher Server]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. -- [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide//workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. +- [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/workload/): Deploy a simple workload and expose it, letting you access it from outside the cluster. + +- [Using the CLI]({{< baseurl >}}/rancher/v2.x/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/content/rancher/v2.x/en/quick-start-guide/cli/_index.md b/content/rancher/v2.x/en/quick-start-guide/cli/_index.md new file mode 100644 index 00000000000..5924bcc2d25 --- /dev/null +++ b/content/rancher/v2.x/en/quick-start-guide/cli/_index.md @@ -0,0 +1,22 @@ +--- +title: CLI with Rancher +weight: 100 +--- + +Interact with Rancher using command line interface (CLI) tools from your workstation. + +## Rancher CLI + +Follow the steps in [rancher cli](../cli). + +Ensure you can run `rancher kubectl get pods` successfully. + + +## kubectl +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + + +Configure kubectl by visiting your cluster in the Rancher Web UI then clicking on `Kubeconfig`, copying contents and putting into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + diff --git a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md index 2d4a3a0b82a..931c0587ff5 100644 --- a/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md @@ -4,7 +4,7 @@ weight: 300 --- Howdy Partner! This tutorial walks you through: -- Installation of {{< product >}} {{< version >}} +- Installation of {{< product >}} 2.x - Creation of your first cluster - Deployment of an application, Nginx @@ -36,7 +36,7 @@ This Quick Start Guide is divided into different tasks for easier consumption. >**Note:** > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. > - > For a full list of port requirements, refer to [Single Node Installation]({{< baseurl >}}/rancher/v2.x/en/installation/references). + > For a full list of port requirements, refer to [Single Node Installation]({{}}/rancher/v2.x/en/installation/node-requirements/). Provision the host according to our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements/). diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md index 6aaaf287912..ebf52672472 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md @@ -19,7 +19,7 @@ For this workload, you'll be deploying the application Rancher Hello-World. 3. Open the **Project: Default** project. -4. From the main menu select **Workloads**, then click on the **Workloads** tab. +4. Click **Resources > Workloads.** In versions prior to v2.3.0, click **Workloads > Workloads.** 5. Click **Deploy**. @@ -49,7 +49,7 @@ Now that the application is up and running it needs to be exposed so that other 3. Open the **Default** project. -4. From the main menu select **Workloads**, then click on the **Load Balancing** tab. +4. Click **Resources > Workloads > Load Balancing.** In versions prior to v2.3.0, click the **Workloads** tab. Click on the **Load Balancing** tab. 5. Click **Add Ingress**. diff --git a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md index 2de08faf907..ace03022684 100644 --- a/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ b/content/rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md @@ -19,7 +19,7 @@ For this workload, you'll be deploying the application Rancher Hello-World. 3. Open the **Project: Default** project. -4. From the main menu select **Workloads**, then click on the **Workloads** tab. +4. Click **Resources > Workloads.** In versions prior to v2.3.0, click **Workloads > Workloads.** 5. Click **Deploy**. diff --git a/content/rancher/v2.x/en/security/_index.md b/content/rancher/v2.x/en/security/_index.md index ff568b4fa14..0264aa61c58 100644 --- a/content/rancher/v2.x/en/security/_index.md +++ b/content/rancher/v2.x/en/security/_index.md @@ -22,10 +22,11 @@ weight: 7505 ### Rancher Hardening Guide -The Rancher Hardening Guide is based off of controls and best practices found in the [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes/) from the Center for Internet Security. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. and Rancher v2.2.x. See Rancher's [Self Assessment of the CIS Kubernetes Benchmark](#cis-benchmark-rancher-self-assessment) for the full list of security controls. +The Rancher Hardening Guide is based off of controls and best practices found in the [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes/) from the Center for Internet Security. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's [Self Assessment of the CIS Kubernetes Benchmark](#cis-benchmark-rancher-self-assessment) for the full list of security controls. - [Hardening Guide for Rancher v2.1.x with Kubernetes 1.11]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.1/) - [Hardening Guide for Rancher v2.2.x with Kubernetes 1.13]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.2/) +- [Hardening Guide for Rancher v2.3.x with Kubernetes 1.15]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3/) ### CIS Benchmark Rancher Self-Assessment @@ -36,6 +37,7 @@ Because Rancher and RKE install Kubernetes services as Docker containers, many o * [CIS Kubernetes Benchmark 1.3.0 - Rancher 2.1.x with Kubernetes 1.11]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.1/) * [CIS Kubernetes Benchmark 1.4.0 - Rancher 2.2.x with Kubernetes 1.13]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.2/#cis-kubernetes-benchmark-1-4-0-rancher-2-2-x-with-kubernetes-1-13/) * [CIS Kubernetes Benchmark 1.4.1 - Rancher 2.2.x with Kubernetes 1.13]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.2/#cis-kubernetes-benchmark-1-4-1-rancher-2-2-x-with-kubernetes-1-13) +* [CIS Kubernetes Benchmark 1.4.1 - Rancher 2.3.x with Kubernetes 1.15]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-x-with-kubernetes-1-15) ### Rancher CVEs and Resolutions diff --git a/content/rancher/v2.x/en/security/benchmark-2.1/_index.md b/content/rancher/v2.x/en/security/benchmark-2.1/_index.md index e2687edd5dd..caa0cb60459 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.1/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.1/_index.md @@ -1,6 +1,6 @@ --- title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x -weight: 104 +weight: 105 --- ### CIS Kubernetes Benchmark 1.3.0 - Rancher 2.1.x with Kubernetes 1.11 diff --git a/content/rancher/v2.x/en/security/benchmark-2.2/_index.md b/content/rancher/v2.x/en/security/benchmark-2.2/_index.md index d582b9cf5c0..bb8a7c4b59d 100644 --- a/content/rancher/v2.x/en/security/benchmark-2.2/_index.md +++ b/content/rancher/v2.x/en/security/benchmark-2.2/_index.md @@ -1,6 +1,6 @@ --- title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x -weight: 103 +weight: 104 --- ### CIS Kubernetes Benchmark 1.4.0 - Rancher 2.2.x with Kubernetes 1.13 diff --git a/content/rancher/v2.x/en/security/benchmark-2.3/_index.md b/content/rancher/v2.x/en/security/benchmark-2.3/_index.md new file mode 100644 index 00000000000..17e10af1103 --- /dev/null +++ b/content/rancher/v2.x/en/security/benchmark-2.3/_index.md @@ -0,0 +1,1791 @@ +--- +title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x +weight: 103 +--- + +### CIS Kubernetes Benchmark 1.4.1 - Rancher 2.3.x with Kubernetes 1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Benchmark_Assessment.pdf) + +#### Overview + +The following document scores a Kubernetes 1.15.x RKE cluster provisioned according to the Rancher v2.3.x hardening guide against the CIS 1.4.1 Kubernetes benchmark. + +> The CIS Benchmark version v1.4.1 covers the security posture of Kubernetes 1.13 clusters. This self-assessment has been run against Kubernetes 1.15, using the guidelines outlined in the CIS v1.4.1 benchmark. Updates to the CIS benchmarks will be applied to this document as they are released. + +This document is a companion to the Rancher v2.3.x security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.4.1. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. + +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` command to provide human-readable formatting. + +#### Known Scored Control Failures + +The following scored controls do not currently pass, and Rancher Labs is working towards addressing these through future enhancements to the product. + +- 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) +- 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +### Controls + +--- + +## 1 - Master Node Security Configuration + +### 1.1 - API Server + +#### 1.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 1.1.2 - Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--basic-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.3 - Ensure that the `--insecure-allow-any-token` argument is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-allow-any-token").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.4 - Ensure that the `--kubelet-https` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-https=false").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.5 - Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Notes** + +Flag not set or `--insecure-bind-address=127.0.0.1`. RKE sets this flag to `--insecure-bind-address=127.0.0.1` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-bind-address=(?:(?!127\\.0\\.0\\.1).)+")' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.6 - Ensure that the `--insecure-port argument` is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--insecure-port=0").string' +``` + +**Returned Value:** `--insecure-port=0` + +**Result:** Pass + +#### 1.1.7 - Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--secure-port=6443").string' +``` + +**Returned Value:** `--secure-port=6443` + +**Result:** Pass + +#### 1.1.8 - Ensure that the `--profiling` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to `false` (Scored) + +**Note:** This deprecated flag was removed in 1.14, so it cannot be set. + +**Result:** Pass + +#### 1.1.10 - Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysAdmit).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(AlwaysPullImages).*").captures[].string' +``` + +**Returned Value:** `AlwaysPullImages` + +**Result:** Pass + +#### 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(DenyEscalatingExec).*").captures[].string' +``` + +**Returned Value:** `DenyEscalatingExec` + +**Result:** Pass + +#### 1.1.13 - Ensure that the admission control plugin `SecurityContextDeny` is set (Not Scored) + +**Notes** + +This **SHOULD NOT** be set if you are using a `PodSecurityPolicy` (PSP). From the CIS Benchmark document: + +> This admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies + +Several system services (such as `nginx-ingress`) utilize `SecurityContext` to switch users and assign capabilities. These exceptions to the general principle of not allowing privilege or capabilities can be managed with PSP. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(SecurityContextDeny).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Document + +#### 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NamespaceLifecycle).*").captures[].string' +``` + +**Returned Value:** `NamespaceLifecycle` + +**Result:** Pass + +#### 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) + +**Notes** + +This path is the path inside of the container. It's combined with the RKE `cluster.yml` `extra-binds:` option to map the audit log to the host filesystem. + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-path=/var/log/kube-audit/audit-log.json").string' +``` + +**Returned Value:** `--audit-log-log=/var/log/kube-audit/audit-log.json` + +**Result:** Pass + +#### 1.1.16 - Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxage=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxage=5` + +**Result:** Pass + +#### 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. Rancher Labs recommends setting this argument to a low value to prevent audit logs from filling the local disk. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxbackup=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxbackup=5` + +**Result:** Pass + +#### 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Notes** + +Audit logs should be collected and shipped off-system to guarantee their integrity. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-log-maxsize=\\d+").string' +``` + +**Returned Value:** `--audit-log-maxsize=100` + +**Result:** Pass + +#### 1.1.19 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` + +**Result:** Pass + +#### 1.1.20 - Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--token-auth-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.21 - Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Notes** + +RKE is using the kubelet's ability to automatically create self-signed certs. No CA cert is saved to verify the communication between `kube-apiserver` and `kubelet`. + +**Mitigation** + +Make sure nodes with `role:controlplane` are on the same local network as your nodes with `role:worker`. Use network ACLs to restrict connections to the kubelet port (10250/tcp) on worker nodes, only permitting it from controlplane nodes. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-certificate-authority=.*").string' +``` + +**Returned Value:** none + +**Result:** Fail (See Mitigation) + +#### 1.1.22 - Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Audit** (`--kubelet-client-certificate`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-certificate=.*").string' +``` + +**Returned Value:** `--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--kubelet-client-key`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--kubelet-client-key=.*").string' +``` + +**Returned Value:** `--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.23 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-lookup=true").string' +``` + +**Returned Value:** `--service-account-lookup=true` + +**Result:** Pass + +#### 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(PodSecurityPolicy).*").captures[].string' +``` + +**Returned Value:** `PodSecurityPolicy` + +**Result:** Pass + +#### 1.1.25 - Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--service-account-key-file=.*").string' +``` + +**Returned Value:** `--service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.1.26 - Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Audit** (`--etcd-certfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-certfile=.*").string' +``` + +**Returned Value:** `--etcd-certfile=/etc/kubernetes/ssl/kube-node.pem` + +**Audit** (`--etcd-keyfile`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-keyfile=.*").string' +``` + +**Returned Value:** `--etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem` + +**Result:** Pass + +#### 1.1.27 - Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(ServiceAccount).*").captures[].string' +``` + +**Returned Value:** `ServiceAccount` + +**Result:** Pass + +#### 1.1.28 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem` + +**Audit** (`--tls-key-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem` + +**Result:** Pass + +#### 1.1.29 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` +**Result:** Pass + +#### 1.1.30 - Ensure that the API Server only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.1.31 - Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--etcd-cafile=.*").string' +``` + +**Returned Value:** `--etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.1.32 - Ensure that the `--authorization-mode` argument includes Node (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=(Node|RBAC|,)+").string' +``` + +**Returned Value:** `--authorization-mode=Node,RBAC` +**Result:** Pass + +#### 1.1.33 - Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(NodeRestriction).*").captures[].string' +``` + +**Returned Value:** `NodeRestriction` + +**Result:** Pass + +#### 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) + +**Notes** +In Kubernetes 1.15.x this flag is `--encryption-provider-config` + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--encryption-provider-config=.*").string' +``` + +**Returned Value:** `encryption-provider-config=/opt/kubernetes/encryption.yaml` + +**Result:** Pass + +#### 1.1.35 - Ensure that the encryption provider is set to aescbc (Scored) + +**Notes** + +Only the first provider in the list is active. + +**Audit** + +``` bash +grep -A 1 providers: /opt/kubernetes/encryption.yaml | grep aescbc +``` + +**Returned Value:** `- aescbc:` + +**Result:** Pass + +#### 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Notes** + +The `EventRateLimit` plugin requires setting the `--admission-control-config-file` option and configuring details in the following files: + +- `/opt/kubernetes/admission.yaml` +- `/opt/kubernetes/event.yaml` + +See Host Configuration for details. + +**Audit** (Admissions plugin) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--enable-admission-plugins=.*(EventRateLimit).*").captures[].string' +``` + +**Returned Value:** `EventRateLimit` + +**Audit** (`--admission-control-config-file`) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--admission-control-config-file=.*").string' +``` + +**Returned Value:** `--admission-control-config-file=/opt/kubernetes/admission.yaml` + +**Result:** Pass + +#### 1.1.37 Ensure that the AdvancedAuditing argument is not set to false (Scored) + +**Notes** + +`AdvancedAuditing=false` should not be set, but `--audit-policy-file` should be set and configured. See Host Configuration for a sample audit policy file. + +**Audit** (Feature Gate) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--feature-gates=.*(AdvancedAuditing=false).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** (Audit Policy File) + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--audit-policy-file=.*").string' +``` + +**Returned Value:** `--audit-policy-file=/opt/kubernetes/audit.yaml` + +**Result:** Pass + +#### 1.1.38 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Notes** + +RKE uses the default value of 60s and doesn't set this option. Tuning this value is specific to the environment. + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--request-timeout=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### Ensure that the --authorization-mode argument includes RBAC (Scored) + +**Audit** + +``` bash +docker inspect kube-apiserver | jq -e '.[0].Args[] | match("--authorization-mode=.*").string' +``` + +**Returned Value:** `"--authorization-mode=Node,RBAC"` + +**Result:** Pass + +### 1.2 - Scheduler + +#### 1.2.1 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` +**Result:** Pass + +#### 1.2.2 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-scheduler | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` +**Result:** Pass + +### 1.3 - Controller Manager + +#### 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--terminated-pod-gc-threshold=\\d+").string' +``` + +**Returned Value:** `--terminated-pod-gc-threshold=1000` + +**Result:** Pass + +#### 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--profiling=false").string' +``` + +**Returned Value:** `--profiling=false` + +**Result:** Pass + +#### 1.3.3 - Ensure that the `--use-service-account-credentials` argument is set to true (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--use-service-account-credentials=true").string' +``` + +**Returned Value:** `--use-service-account-credentials=true` + +**Result:** Pass + +#### 1.3.4 - Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--service-account-private-key-file=.*").string' +``` + +**Returned Value:** `--service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem` + +**Result:** Pass + +#### 1.3.5 - Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--root-ca-file=.*").string' +``` + +**Returned Value:** `--root-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 1.3.6 - Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) + +**Notes** + +RKE does not yet support certificate rotation. This feature is due for the 0.1.12 release of RKE. + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +``` bash +docker inspect kube-controller-manager | jq -e '.[0].Args[] | match("--address=127\\.0\\.0\\.1").string' +``` + +**Returned Value:** `--address=127.0.0.1` + +**Result:** Pass + +### 1.4 - Configuration Files + +#### 1.4.1 - Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.2 - Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.3 - Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.4 - Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-controller-manager`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.5 - Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for `kube-scheduler`. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.6 - Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kube-scheduler. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.7 - Ensure that the `etcd` pod specification file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.8 - Ensure that the `etcd` pod specification file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 1.4.9 - Ensure that the Container Network Interface file permissions are set to `644` or more restrictive (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +**Note** +This may return a lockfile. Permissions on this file do not need to be as restrictive as the CNI files. + +``` bash +stat -c "%n - %a" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - 644 +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - 644 +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - 644 +/var/lib/cni/networks/k8s-pod-network/lock - 750 +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %a" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - 664 +/etc/cni/net.d/calico-kubeconfig - 600 +``` + +**Result:** Pass + +#### 1.4.10 - Ensure that the Container Network Interface file ownership is set to `root:root` (Not Scored) + +**Notes** + +This is a manual check. + +**Audit** (`/var/lib/cni/networks/k8s-pod-network`) + +``` bash +stat -c "%n - %U:%G" /var/lib/cni/networks/k8s-pod-network/* +``` + +**Returned Value:** + +``` bash +/var/lib/cni/networks/k8s-pod-network/10.42.0.2 - root:root +/var/lib/cni/networks/k8s-pod-network/10.42.0.3 - root:root +/var/lib/cni/networks/k8s-pod-network/last_reserved_ip.0 - root:root +/var/lib/cni/networks/k8s-pod-network/lock - root:root +``` + +**Audit** (`/etc/cni/net.d`) + +``` bash +stat -c "%n - %U:%G" /etc/cni/net.d/* +``` + +**Returned Value:** + +``` bash +/etc/cni/net.d/10-canal.conflist - root:root +/etc/cni/net.d/calico-kubeconfig - root:root +``` + +**Result:** Pass + +#### 1.4.11 - Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Notes** + +Files underneath the data dir have permissions set to `700` + +``` bash +stat -c "%n - %a" /var/lib/rancher/etcd/* + +/var/lib/etcd/member - 700 +``` + +**Audit** + +``` bash +stat -c %a /var/lib/rancher/etcd +``` + +**Returned Value:** `700` + +**Result:** Pass + +#### 1.4.12 - Ensure that the `etcd` data directory ownership is set to `etcd:etcd` (Scored) + +**Notes** + +The `etcd` container runs as the `etcd` user. The data directory and files are owned by `etcd`. + +**Audit** + +``` bash +stat -c %U:%G /var/lib/rancher/etcd +``` + +**Returned Value:** `etcd:etcd` + +**Result:** Pass + +#### 1.4.13 - Ensure that the file permissions for `admin.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It's presented to user where RKE is run. We recommend that this kube_config_cluster.yml file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.14 - Ensure that ownership of `admin.conf` is set to `root:root` (Scored) + +**Notes** + +RKE does not store the default `kubectl` config credentials file on the nodes. It presents credentials to the user when `rke` is first run, and only on the device where the user ran the command. Rancher Labs recommends that this `kube_config_cluster.yml` file be kept in secure store. + +**Result:** Pass (Not Applicable) + +#### 1.4.15 - Ensure that the file permissions for `scheduler.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 1.4.16 - Ensure that the file ownership of `scheduler.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.17 - Ensure that the file permissions for `controller-manager.conf` are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 1.4.18 - Ensure that the file ownership of `controller-manager.conf` is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 1.4.19 - Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Scored) + +**Audit** + +``` bash +ls -laR /etc/kubernetes/ssl/ |grep -v yaml + +``` + +**Returned Value:** +``` bash +total 128 +drwxr-xr-x 2 root root 4096 Jul 1 19:53 . +drwxr-xr-x 4 root root 4096 Jul 1 19:53 .. +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-key.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-apiserver-proxy-client-key.pem +-rw-r--r-- 1 root root 1107 Jul 1 19:53 kube-apiserver-proxy-client.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-apiserver-requestheader-ca-key.pem +-rw-r--r-- 1 root root 1082 Jul 1 19:53 kube-apiserver-requestheader-ca.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-apiserver.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-ca-key.pem +-rw-r--r-- 1 root root 1017 Jul 1 19:53 kube-ca.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-controller-manager-key.pem +-rw-r--r-- 1 root root 1062 Jul 1 19:53 kube-controller-manager.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-16-161-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-16-161.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-etcd-172-31-24-134-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-24-134.pem +-rw------- 1 root root 1675 Jul 1 19:53 kube-etcd-172-31-30-57-key.pem +-rw-r--r-- 1 root root 1277 Jul 1 19:53 kube-etcd-172-31-30-57.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-node-key.pem +-rw-r--r-- 1 root root 1070 Jul 1 19:53 kube-node.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-proxy-key.pem +-rw-r--r-- 1 root root 1046 Jul 1 19:53 kube-proxy.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-scheduler-key.pem +-rw-r--r-- 1 root root 1050 Jul 1 19:53 kube-scheduler.pem +-rw------- 1 root root 1679 Jul 1 19:53 kube-service-account-token-key.pem +-rw-r--r-- 1 root root 1285 Jul 1 19:53 kube-service-account-token.pem +``` + +**Result:** Pass + +#### 1.4.20 - Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*.pem |grep -v key + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem - 644 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem - 644 +/etc/kubernetes/ssl/kube-apiserver.pem - 644 +/etc/kubernetes/ssl/kube-ca.pem - 644 +/etc/kubernetes/ssl/kube-controller-manager.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem - 644 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57.pem - 644 +/etc/kubernetes/ssl/kube-node.pem - 644 +/etc/kubernetes/ssl/kube-proxy.pem - 644 +/etc/kubernetes/ssl/kube-scheduler.pem - 644 +/etc/kubernetes/ssl/kube-service-account-token.pem - 644 +``` + +**Result:** Pass + +#### 1.4.21 - Ensure that the Kubernetes PKI key file permissions are set to 600 (Scored) + +**Audit** + +``` bash +stat -c "%n - %a" /etc/kubernetes/ssl/*key* + +``` + +**Returned Value:** +``` bash +/etc/kubernetes/ssl/kube-apiserver-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem - 600 +/etc/kubernetes/ssl/kube-apiserver-requestheader-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-ca-key.pem - 600 +/etc/kubernetes/ssl/kube-controller-manager-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-16-161-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem - 600 +/etc/kubernetes/ssl/kube-etcd-172-31-30-57-key.pem - 600 +/etc/kubernetes/ssl/kube-node-key.pem - 600 +/etc/kubernetes/ssl/kube-proxy-key.pem - 600 +/etc/kubernetes/ssl/kube-scheduler-key.pem - 600 +/etc/kubernetes/ssl/kube-service-account-token-key.pem - 600 +``` + +**Result:** Pass + +### 1.5 - etcd + +#### 1.5.1 - Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Audit** `(--cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134.pem` + +**Audit** (`--key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--key-file=/etc/kubernetes/ssl/kube-etcd-172-31-24-134-key.pem` + +**Result:** Pass + +#### 1.5.2 - Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting "--client-cert-auth" is the equivalent of setting "--client-cert-auth=true". + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--client-cert-auth` + +**Result:** Pass + +#### 1.5.3 - Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.4 - Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Audit** (`--peer-cert-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-cert-file=.*").string' +``` + +**Note** +Certificate file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-cert-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135.pem` + +**Audit** (`--peer-key-file`) + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-key-file=.*").string' +``` + +**Note** +Key file name may vary slightly, since it contains the IP of the etcd container. + +**Returned Value:** `--peer-key-file=/etc/kubernetes/ssl/kube-etcd-172-31-22-135-key.pem` + +**Result:** Pass + +#### 1.5.5 - Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Notes** + +Setting `--peer-client-cert-auth` is the equivalent of setting `--peer-client-cert-auth=true`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-client-cert-auth(=true)*").string' +``` + +**Returned Value:** `--peer-client-cert-auth` + +**Result:** Pass + +#### 1.5.6 - Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--peer-auto-tls(?:(?!=false).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.5.7 - Ensure that a unique Certificate Authority is used for `etcd` (Not Scored) + +**Mitigation** + +RKE supports connecting to an external etcd cluster. This external cluster could be configured with its own discreet CA. + +**Notes** + +`--trusted-ca-file` is set and different from the `--client-ca-file` used by `kube-apiserver`. + +**Audit** + +``` bash +docker inspect etcd | jq -e '.[0].Args[] | match("--trusted-ca-file=(?:(?!/etc/kubernetes/ssl/kube-ca.pem).*)").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (See Mitigation) + +#### 1.6 - General Security Primitives + +These "Not Scored" controls are implementation best practices. To ease the administrative burden, we recommend that you implement these best practices on your workload clusters by creating clusters with Rancher rather than using RKE alone. + +#### 1.6.1 - Ensure that the cluster-admin role is only used where required (Not Scored) + + +Rancher has built in support for maintaining and enforcing Kubernetes RBAC on your workload clusters. + +Rancher has the ability integrate with external authentication sources (LDAP, SAML, AD…) allows easy access with unique credentials to your existing users or groups. + +#### 1.6.2 - Create administrative boundaries between resources using namespaces (Not Scored) + +With Rancher, users or groups can be assigned access to all clusters, a single cluster or a "Project" (a group of one or more namespaces in a cluster). This allows granular access control to cluster resources. + +#### 1.6.3 - Create network segmentation using Network Policies (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate "Projects" (a group of one or more namespaces) in a cluster. + +See "Cluster Options" when creating a cluster with Rancher to turn on Network Isolation. + +#### 1.6.4 - Ensure that the `seccomp` profile is set to `docker/default` in your pod definitions (Not Scored) + +Since this requires the enabling of AllAlpha feature gates we would not recommend enabling this feature at the moment. + +#### 1.6.5 - Apply security context to your pods and containers (Not Scored) + +This practice does go against control 1.1.13, but we prefer using a PodSecurityPolicy and allowing security context to be set over a blanket deny. + +Rancher allows users to set various Security Context options when launching pods via the GUI interface. + +#### 1.6.6 - Configure image provenance using the `ImagePolicyWebhook` admission controller (Not Scored) + +Image Policy Webhook requires a 3rd party service to enforce policy. This can be configured in the `--admission-control-config-file`. See the Host configuration section for the admission.yaml file. + +#### 1.6.7 - Configure network policies as appropriate (Not Scored) + +Rancher can (optionally) automatically create Network Policies to isolate projects (a group of one or more namespaces) within a cluster. + +See the _Cluster Options_ section when creating a cluster with Rancher to turn on network isolation. + +#### 1.6.8 - Place compensating controls in the form of PodSecurityPolicy (PSP) and RBAC for privileged container usage (Not Scored) + +Section 1.7 of this guide shows how to add and configure a default "restricted" PSP based on controls. + +With Rancher you can create a centrally maintained "restricted" PSP and deploy it to all of the clusters that Rancher manages. + + +#### 1.7 - Pod Security Policies (PSP) + +This RKE configuration has two Pod Security Policies. + +- `default-psp`: assigned to namespaces that require additional privileged access: `kube-system`, `ingress-nginx` and `cattle-system`. +- `restricted`: This is the cluster default PSP and follows the best practices defined by controls in this section. + +#### 1.7.1 - Do not admit privileged containers (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.privileged}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostPID}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostIPC}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.4 - Do not admit containers wishing to share the host network namespace (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.hostNetwork}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.allowPrivilegeEscalation}' | grep "true" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.6 - Do not admit containers whose processes run as `root` (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.runAsUser.rule}' | grep "RunAsAny" +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Notes** + +The restricted PodSecurityPolicy is available to all ServiceAccounts. + +**Audit** + +``` bash +kubectl get psp restricted -o jsonpath='{.spec.requiredDropCapabilities}' | grep "NET_RAW" +``` + +**Returned Value:** `[NET_RAW]` + +**Result:** Pass + +## 2 - Worker Node Security Configuration + +### 2.1 - Kubelet + +#### 2.1.1 - Ensure that the `--anonymous-auth` argument is set to `false` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--anonymous-auth=false").string' +``` + +**Returned Value:** `--anonymous-auth=false` + +**Result:** Pass + +#### 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--authorization-mode=Webhook").string' +``` + +**Returned Value:** `--authorization-mode=Webhook` + +**Result:** Pass + +#### 2.1.3 - Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--client-ca-file=.*").string' +``` + +**Returned Value:** `--client-ca-file=/etc/kubernetes/ssl/kube-ca.pem` + +**Result:** Pass + +#### 2.1.4 - Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--read-only-port=0").string' +``` + +**Returned Value:** `--read-only-port=0` + +**Result:** Pass + +#### 2.1.5 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--streaming-connection-idle-timeout=.*").string' +``` + +**Returned Value:** `--streaming-connection-idle-timeout=1800s` + +**Result:** Pass + +#### 2.1.6 - Ensure that the `--protect-kernel-defaults` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--protect-kernel-defaults=true").string' +``` + +**Returned Value:** `--protect-kernel-defaults=true` + +**Result:** Pass + +#### 2.1.7 - Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--make-iptables-util-chains=true").string' +``` + +**Returned Value:** `--make-iptables-util-chains=true` + +**Result:** Pass + +#### 2.1.8 - Ensure that the `--hostname-override` argument is not set (Scored) + +**Notes** +This is used by most cloud providers. Not setting this is not practical in most cases. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--hostname-override=.*").string' +``` + +**Returned Value:** `--hostname-override=` + +**Result:** Fail + +#### 2.1.9 - Ensure that the `--event-qps` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--event-qps=0").string' +``` + +**Returned Value:** `--event-qps=0` + +**Result:** Pass + +#### 2.1.10 - Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Notes** + +RKE does not set these options and uses the kubelet's self generated certificates for TLS communication. These files are located in the default directory (`/var/lib/kubelet/pki`). + +**Audit** (`--tls-cert-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cert-file=.*").string' +``` + +**Returned Value:** `null` + +**Audit** (`--tls-private-key-file`) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-private-key-file=.*").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 2.1.11 - Ensure that the `--cadvisor-port` argument is set to `0` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--cadvisor-port=0").string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +#### 2.1.12 - Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Notes** + +RKE handles certificate rotation through an external process. + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--rotate-certificates=true").string' +``` + +**Returned Value:** `null` + +**Result:** Pass (Not Applicable) + +#### 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--feature-gates=.*(RotateKubeletServerCertificate=true).*").captures[].string' +``` + +**Returned Value:** `RotateKubeletServerCertificate=true` + +**Result:** Pass + +#### 2.1.14 - Ensure that the kubelet only makes use of strong cryptographic ciphers (Not Scored) + +**Audit** (Allowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_256_GCM_SHA384).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_256_GCM_SHA384` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(TLS_RSA_WITH_AES_128_GCM_SHA256).*").captures[].string' +``` + +**Returned Value:** `TLS_RSA_WITH_AES_128_GCM_SHA256` + +**Audit** (Disallowed Ciphers) + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(CBC).*").captures[].string' +``` + +**Returned Value:** `null` + +**Audit** + +``` bash +docker inspect kubelet | jq -e '.[0].Args[] | match("--tls-cipher-suites=.*(RC4).*").captures[].string' +``` + +**Returned Value:** `null` + +**Result:** Pass + +### 2.2 - Configuration Files + +#### 2.2.1 - Ensure that the permissions for `kubelet.conf` are set to `644` or more restrictive (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.2 - Ensure that the kubelet.conf file ownership is set to root:root (Scored) + +**Notes** + +This is the value of the `--kubeconfig` option. + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.3 - Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + + +#### 2.2.4 - Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.5 - Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.6 - Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.7 - Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Audit** + +``` bash +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `644` + +**Result:** Pass + +#### 2.2.8 - Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Audit** + +``` bash +stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem +``` + +**Returned Value:** `root:root` + +**Result:** Pass + +#### 2.2.9 - Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) + +#### 2.2.10 - Ensure that the kubelet configuration file permissions are set to `644` or more restrictive (Scored) + +**Notes** + +RKE doesn't require or maintain a configuration file for kubelet. All configuration is passed in as arguments at container run time. + +**Result:** Pass (Not Applicable) diff --git a/content/rancher/v2.x/en/security/hardening-2.1/_index.md b/content/rancher/v2.x/en/security/hardening-2.1/_index.md index 07c9338593a..e525794f055 100644 --- a/content/rancher/v2.x/en/security/hardening-2.1/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.1/_index.md @@ -366,8 +366,8 @@ To pass the following controls in the CIS benchmark, ensure the appropriate flag Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - `--streaming-connection-idle-timeout=` -- `--protect-kernel-defaults=false` -- `--make-iptables-util-chains=false` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` - `--event-qps=0` **Remediation** diff --git a/content/rancher/v2.x/en/security/hardening-2.2/_index.md b/content/rancher/v2.x/en/security/hardening-2.2/_index.md index 2f73b7c0be2..f6d24831f25 100644 --- a/content/rancher/v2.x/en/security/hardening-2.2/_index.md +++ b/content/rancher/v2.x/en/security/hardening-2.2/_index.md @@ -385,8 +385,8 @@ Inspect the Kubelet containers on all hosts and verify that they are running wit - `--streaming-connection-idle-timeout=` - `--authorization-mode=Webhook` -- `--protect-kernel-defaults=false` -- `--make-iptables-util-chains=false` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` - `--event-qps=0` - `--anonymous-auth=false` - `--feature-gates="RotateKubeletServerCertificate=true"` diff --git a/content/rancher/v2.x/en/security/hardening-2.3/_index.md b/content/rancher/v2.x/en/security/hardening-2.3/_index.md new file mode 100644 index 00000000000..ac8f578efcd --- /dev/null +++ b/content/rancher/v2.x/en/security/hardening-2.3/_index.md @@ -0,0 +1,1430 @@ +--- +title: Hardening Guide - Rancher v2.3.x +weight: 100 +--- + +### Hardening Guide for Rancher 2.3.x with Kubernetes 1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) + +### Overview + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.x with Kubernetes v1.15. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{< baseurl >}}/rancher/v2.x/en/security/benchmark-2.3/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher HA Kubernetes cluster host configuration + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +- Run `sysctl -p` to enable the settings. + +### 1.1.2 - Install the encryption provider configuration on all control plane nodes + +**Profile Applicability** + +- Level 1 + +**Description** + +Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: + +**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` + +**Rationale** + +This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. + +This supports the following controls: + +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) + +**Audit** + +On the control plane hosts for the Rancher HA cluster run: + +``` bash +stat /opt/kubernetes/encryption.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. + +**Remediation** + +- Generate a key and an empty configuration file: + +``` bash +head -c 32 /dev/urandom | base64 -i - +touch /opt/kubernetes/encryption.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/encryption.yaml +chmod 0600 /opt/kubernetes/encryption.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `secret` is the 32-byte base64-encoded string generated in the first step. + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.3 - Install the audit log configuration on all control plane nodes. + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. + +**Rationale** + +The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. + +This supports the following controls: + +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) + +**Audit** + +On each control plane node, run: + +``` bash +stat /opt/kubernetes/audit.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/audit.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/audit.yaml +chmod 0600 /opt/kubernetes/audit.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.4 - Place Kubernetes event limit configuration on each control plane host + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. + +**Rationale** + +Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. + +This supports the following control: + +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Audit** + +On nodes with the `controlplane` role run: + +``` bash +stat /opt/kubernetes/admission.yaml +stat /opt/kubernetes/event.yaml +``` + +For each file, ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` + +For `admission.yaml` ensure that the file contains: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +For `event.yaml` ensure that the file contains: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/admission.yaml +touch /opt/kubernetes/event.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/admission.yaml +chown root:root /opt/kubernetes/event.yaml +chmod 0600 /opt/kubernetes/admission.yaml +chmod 0600 /opt/kubernetes/event.yaml +``` + +- For `admission.yaml` set the contents to: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +- For `event.yaml` set the contents to: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory has permissions of 700 or more restrictive. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. + +**Audit** + +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %a /var/lib/rancher/etcd +``` + +Verify that the permissions are `700` or more restrictive. + +**Remediation** + +Follow the steps as documented in [1.4.12]({{< baseurl >}}/rancher/v2.x/en/security/hardening-2.3/#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. + +### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Audit** + +On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %U:%G /var/lib/rancher/etcd +``` + +Verify that the ownership is set to `etcd:etcd`. + +**Remediation** + +- On the etcd server node(s) add the `etcd` user: + +``` bash +useradd etcd +``` + +Record the uid/gid: + +``` bash +id etcd +``` + +- Add the following to the RKE `cluster.yml` etcd section under `services`: + +``` yaml +services: + etcd: + uid: + gid: +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix A. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + extra_args: + authorization-mode: "Webhook" + streaming-connection-idle-timeout: "" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" +--encryption-provider-config=/opt/kubernetes/encryption.yaml +--admission-control-config-file=/opt/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=5 +--audit-log-maxbackup=5 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/opt/kubernetes/audit.yaml +--tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + … + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole psp:restricted +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding psp:restricted +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.3.x, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local admin password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local admin password should be changed from the default. + +**Rationale** + +The default admin password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +--- + +## Appendix A - Complete RKE `cluster.yml` Example + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] + +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "1800s" + authorization-mode: "Webhook" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + services: + etcd: + uid: 1001 + gid: 1001 +addons: | + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +## Appendix B - Complete RKE Template Example + +``` yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + ignore_docker_version: true +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1001 + retention: 72h + snapshot: false + uid: 1001 + kube_api: + always_pull_images: false + extra_args: + admission-control-config-file: /opt/kubernetes/admission.yaml + anonymous-auth: 'false' + audit-log-format: json + audit-log-maxage: '5' + audit-log-maxbackup: '5' + audit-log-maxsize: '100' + audit-log-path: /var/log/kube-audit/audit-log.json + audit-policy-file: /opt/kubernetes/audit.yaml + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy + encryption-provider-config: /opt/kubernetes/encryption.yaml + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/var/log/kube-audit:/var/log/kube-audit' + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md index 1627ac79164..0c73699ee9f 100644 --- a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/_index.md @@ -1,536 +1,18 @@ --- -title: Kubernetes components +title: Kubernetes Components weight: 100 --- -The commands/steps listed on this page apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) clusters. -## Diagram +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd) +- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane) +- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy) +- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic) + +# Kubernetes Component Diagram ![Cluster diagram]({{< baseurl >}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -## etcd - -This section applies to nodes with the `etcd` role. - -### Is etcd container is running - -The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name=etcd$ -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd -``` - -### etcd container logging - -The logging of the container can contain information on what the problem could be. - -``` -docker logs etcd -``` - -* `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` - -A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. - -* `xxx is starting a new election at term x` - -The etcd cluster has lost it's quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. - -* `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` - -The host firewall is preventing network communication. - -* `rafthttp: request cluster ID mismatch` - -The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. - -* `rafthttp: failed to find member` - -The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. - -### etcd cluster and connectivity checks - -The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) - -* Check etcd members on all nodes - -Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. - -Command: -``` -docker exec etcd etcdctl member list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" -``` - -Example output: -``` -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 -``` - -* Check endpoint status - -The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. - -Command: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | -| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | -| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -* Check endpoint health - -Command: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Example output: -``` -https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms -https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms -``` - -* Check connectivity on port TCP/2379 - -Requires the `curl` binary on the node. - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health"; -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - curl -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health"; -done -``` - -If you are running on an operating system without `curl` (for example, RancherOS), you can use the following command which uses a Docker container to run the `curl` command. - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health" - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do - echo "Validating connection to ${endpoint}/health"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" -done -``` - -Example output: -``` -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -Validating connection to https://IP:2379/health -{"health": "true"} -``` - -* Check connectivity on port TCP/2380 - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - curl --http1.1 -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version"; -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - curl --http1.1 -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version"; -done -``` - -If you are running on an operating system without `curl` (for example, RancherOS), you can use the following command which uses a Docker container to run the `curl` command. - -Command: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do - echo "Validating connection to ${endpoint}/version"; - docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" -done -``` - -Example output: -``` -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -Validating connection to https://IP:2380/version -{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} -``` - -### etcd alarms - -etcd will trigger alarms, for instance when it runs out of space. - -Command: -``` -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output when NOSPACE alarm is triggered: -``` -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -``` - -### etcd space errors - -Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. - -Resolution: - -* Compact the keyspace - -Command: -``` -rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') -docker exec etcd etcdctl compact "$rev" -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" -``` - -Example output: -``` -compacted revision xxx -``` - -* Defrag all etcd members - -Command: -``` -docker exec etcd etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" -``` - -Example output: -``` -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -Finished defragmenting etcd member[https://IP:2379] -``` - -* Check endpoint status - -Command: -``` -docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" -``` - -Example output: -``` -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | -| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -``` - -* Disarm alarm - -After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. - -Command: -``` -docker exec etcd etcdctl alarm list -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" -docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" -``` - -Example output: -``` -docker exec etcd etcdctl alarm list -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -memberID:x alarm:NOSPACE -docker exec etcd etcdctl alarm disarm -docker exec etcd etcdctl alarm list -``` - -### Log level - -The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. - -Command: -``` -curl -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -curl -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv $ETCDCTL_ENDPOINT)/config/local/log -``` - -To reset the log level back to the default (`INFO`), you can use the following command. - -Command: -``` -curl -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -curl -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv $ETCDCTL_ENDPOINT)/config/local/log -``` - -### etcd content - -If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. - -* Watch streaming events - -Command: -``` -docker exec etcd etcdctl watch --prefix /registry -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry -``` - -If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. - -* Query etcd directly - -Command: -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only -``` - -Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: -``` -docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only -``` - -You can process the data to get a summary of count per key, using the command below: - -``` -docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr -``` - -## controlplane - -This section applies to nodes with the `controlplane` role. - -### Are the containers for controlplane running - -There are three specific containers launched on nodes with the `controlpane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -### controlplane container logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` - -## nginx-proxy - -The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. - -### Is the container running - -The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name=nginx-proxy -``` - -Example output: - -``` -docker ps -a -f=name=nginx-proxy -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy -``` - -### Check generated NGINX configuration - -The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: - -``` -docker exec nginx-proxy cat /etc/nginx/nginx.conf -``` - -Example output: -``` -error_log stderr notice; - -worker_processes auto; -events { - multi_accept on; - use epoll; - worker_connections 1024; -} - -stream { - upstream kube_apiserver { - - server ip_of_controlplane_node1:6443; - - server ip_of_controlplane_node2:6443; - - } - - server { - listen 6443; - proxy_pass kube_apiserver; - proxy_timeout 30; - proxy_connect_timeout 2s; - - } - -} -``` - -### nginx-proxy container logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs nginx-proxy -``` - -## worker and generic - -This section applies to every node as it includes components that run on nodes with any role. - -### Are the containers running - -There are three specific containers launched on nodes with the `controlpane` role: - -* kubelet -* kube-proxy - -The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. - -``` -docker ps -a -f=name='kubelet|kube-proxy' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy -a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet -``` - -### container logging - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kubelet -docker logs kube-proxy -``` +Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md new file mode 100644 index 00000000000..e3451e421ef --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/_index.md @@ -0,0 +1,40 @@ +--- +title: Troubleshooting Controlplane Nodes +weight: 2 +--- + +This section applies to nodes with the `controlplane` role. + +# Check if the Controlplane Containers are Running + +There are three specific containers launched on nodes with the `controlpane` role: + +* `kube-apiserver` +* `kube-controller-manager` +* `kube-scheduler` + +The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver +f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler +bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager +``` + +# Controlplane Container Logging + +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{< baseurl >}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kube-apiserver +docker logs kube-controller-manager +docker logs kube-scheduler +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md new file mode 100644 index 00000000000..6cdb9fefdaa --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/etcd/_index.md @@ -0,0 +1,365 @@ +--- +title: Troubleshooting etcd Nodes +weight: 1 +--- + +This section contains commands and tips for troubleshooting nodes with the `etcd` role. + +This page covers the following topics: + +- [Checking if the etcd Container is Running](#checking-if-the-etcd-container-is-running) +- [etcd Container Logging](#etcd-container-logging) +- [etcd Cluster and Connectivity Checks](#etcd-cluster-and-connectivity-checks) + - [Check etcd Members on all Nodes](#check-etcd-members-on-all-nodes) + - [Check Endpoint Status](#check-endpoint-status) + - [Check Endpoint Health](#check-endpoint-health) + - [Check Connectivity on Port TCP/2379](#check-connectivity-on-port-tcp-2379) + - [Check Connectivity on Port TCP/2380](#check-connectivity-on-port-tcp-2380) +- [etcd Alarms](#etcd-alarms) +- [etcd Space Errors](#etcd-space-errors) +- [Log Level](#log-level) +- [etcd Content](#etcd-content) + - [Watch Streaming Events](#watch-streaming-events) + - [Query etcd Directly](#query-etcd-directly) +- [Replacing Unhealthy etcd Nodes](#replacing-unhealthy-etcd-nodes) + +# Checking if the etcd Container is Running + +The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name=etcd$ +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd +``` + +# etcd Container Logging + +The logging of the container can contain information on what the problem could be. + +``` +docker logs etcd +``` +| Log | Explanation | +|-----|------------------| +| `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` | A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. | +| `xxx is starting a new election at term x` | The etcd cluster has lost its quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. | +| `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` | The host firewall is preventing network communication. | +| `rafthttp: request cluster ID mismatch` | The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. | +| `rafthttp: failed to find member` | The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. + +# etcd Cluster and Connectivity Checks + +The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) + +### Check etcd Members on all Nodes + +Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. + +Command: +``` +docker exec etcd etcdctl member list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" +``` + +Example output: +``` +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +``` + +### Check Endpoint Status + +The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. + +Command: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | +| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | +| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Check Endpoint Health + +Command: +``` +docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Example output: +``` +https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms +``` + +### Check Connectivity on Port TCP/2379 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health" + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Example output: +``` +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +``` + +### Check Connectivity on Port TCP/2380 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Example output: +``` +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +``` + +# etcd Alarms + +etcd will trigger alarms, for instance when it runs out of space. + +Command: +``` +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output when NOSPACE alarm is triggered: +``` +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +``` + +# etcd Space Errors + +Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. + +Resolutions: + +- [Compact the Keyspace](#compact-the-keyspace) +- [Defrag All etcd Members](#defrag-all-etcd-members) +- [Check Endpoint Status](#check-endpoint-status) +- [Disarm Alarm](#disarm-alarm) + +### Compact the Keyspace + +Command: +``` +rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') +docker exec etcd etcdctl compact "$rev" +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" +``` + +Example output: +``` +compacted revision xxx +``` + +### Defrag All etcd Members + +Command: +``` +docker exec etcd etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" +``` + +Example output: +``` +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +``` + +### Check Endpoint Status + +Command: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Disarm Alarm + +After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. + +Command: +``` +docker exec etcd etcdctl alarm list +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output: +``` +docker exec etcd etcdctl alarm list +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +# Log Level + +The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +To reset the log level back to the default (`INFO`), you can use the following command. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +# etcd Content + +If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. + +### Watch Streaming Events + +Command: +``` +docker exec etcd etcdctl watch --prefix /registry +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry +``` + +If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. + +### Query etcd Directly + +Command: +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only +``` + +You can process the data to get a summary of count per key, using the command below: + +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr +``` + +# Replacing Unhealthy etcd Nodes + +When a node in your etcd cluster becomes unhealthy, the recommended approach is to fix or remove the failed or unhealthy node before adding a new etcd node to the cluster. diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md new file mode 100644 index 00000000000..70505e96280 --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md @@ -0,0 +1,69 @@ +--- +title: Troubleshooting nginx-proxy +weight: 3 +--- + +The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. + +# Check if the Container is Running + +The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name=nginx-proxy +``` + +Example output: + +``` +docker ps -a -f=name=nginx-proxy +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy +``` + +# Check Generated NGINX Configuration + +The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: + +``` +docker exec nginx-proxy cat /etc/nginx/nginx.conf +``` + +Example output: +``` +error_log stderr notice; + +worker_processes auto; +events { + multi_accept on; + use epoll; + worker_connections 1024; +} + +stream { + upstream kube_apiserver { + + server ip_of_controlplane_node1:6443; + + server ip_of_controlplane_node2:6443; + + } + + server { + listen 6443; + proxy_pass kube_apiserver; + proxy_timeout 30; + proxy_connect_timeout 2s; + + } + +} +``` + +# nginx-proxy Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs nginx-proxy +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md new file mode 100644 index 00000000000..2f92ed8b9b2 --- /dev/null +++ b/content/rancher/v2.x/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md @@ -0,0 +1,35 @@ +--- +title: Troubleshooting Worker Nodes and Generic Components +weight: 4 +--- + +This section applies to every node as it includes components that run on nodes with any role. + +# Check if the Containers are Running + +There are three specific containers launched on nodes with the `controlpane` role: + +* kubelet +* kube-proxy + +The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name='kubelet|kube-proxy' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy +a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet +``` + +# Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kubelet +docker logs kube-proxy +``` \ No newline at end of file diff --git a/content/rancher/v2.x/en/troubleshooting/networking/_index.md b/content/rancher/v2.x/en/troubleshooting/networking/_index.md index 75173ccd167..a8c5f64a6a5 100644 --- a/content/rancher/v2.x/en/troubleshooting/networking/_index.md +++ b/content/rancher/v2.x/en/troubleshooting/networking/_index.md @@ -9,7 +9,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +Double check if all the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. ### Check if overlay network is functioning correctly @@ -45,7 +45,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition 2. Launch it using `kubectl create -f ds-overlaytest.yml` 3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following command to let each container on every host ping each other (it's a single line command). +4. Run the following command, from the same location, to let each container on every host ping each other (it's a single line bash command). ``` echo "=> Start network overlay test"; kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End network overlay test" @@ -58,7 +58,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition => End network overlay test ``` -If you see error in the output, that means that the [required ports]({{< baseurl >}}/rancher/v2.x/en/installation/references/) for overlay networking are not opened between the hosts indicated. +If you see error in the output, that means that the [required ports]({{}}/rancher/v2.x/en/cluster-provisioning/node-requirements/#networking-requirements/) for overlay networking are not opened between the hosts indicated. Example error output of a situation where NODE1 had the UDP ports blocked. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/_index.md index c2abe862d19..f61331fcc6b 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/_index.md @@ -2,7 +2,7 @@ title: Upgrades weight: 1005 --- -This section contains information about how to upgrade your Rancher server to a newer version. Regardless if you installed in an air gap environment or not, the upgrade steps will be based on what type of install you chosen. Select from the following options: +This section contains information about how to upgrade your Rancher server to a newer version. Regardless if you installed in an air gap environment or not, the upgrade steps mainly depend on whether you have a single node or high-availability installation of Rancher. Select from the following options: - [Upgrading a Single Node Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/single-node/) - [Upgrading an HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha/) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md index 7b899e1d63e..a744cda7ff0 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha/_index.md @@ -7,32 +7,32 @@ aliases: - /rancher/v2.x/en/upgrades/air-gap-upgrade/ --- -The following instructions will guide you through upgrading a high-availability (HA) Rancher server installation. +The following instructions will guide you through using Helm to upgrade a high-availability (HA) Rancher server installation. ->**Note:** If you installed Rancher using the RKE Add-on yaml, following the directions to [migrate or upgrade]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{< baseurl >}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on). >**Note:** [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.x/en/installation/options/upgrading-cert-manager) -## Prerequisites +# Prerequisites - **Review the [Known Upgrade Issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [Caveats]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** - -- **[Air Gap Installs Only:]({{< baseurl >}}/rancher/v2.x/en/installations/air-gap/) Collect and Populate Images for the new Rancher server version** +- **[Air Gap Installs Only:]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap/) Collect and Populate Images for the new Rancher server version** Follow the guide to [populate your private registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. -## Upgrade Outline +# Upgrade Outline Follow the steps to upgrade Rancher server: -- A. Backup your Kubernetes Cluster that is running Rancher server -- B. Update the Helm chart repository -- C. Upgrade Rancher -- D. Verify the Upgrade +- [A. Back up your Kubernetes cluster that is running Rancher server](#a-backup-your-kubernetes-cluster-that-is-running-rancher-server) +- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) +- [C. Upgrade Rancher](#c-upgrade-rancher) +- [D. Verify the Upgrade](#d-verify-the-upgrade) -### A. Backup your Kubernetes Cluster that is running Rancher server +### A. Back up Your Kubernetes Cluster that is Running Rancher Server [Take a one-time snapshot]({{< baseurl >}}/rancher/v2.x/en/backups/backups/ha-backups/#option-b-one-time-snapshots) of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restoration point if something goes wrong during upgrade. @@ -72,10 +72,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a ### C. Upgrade Rancher -Choose from the following options: - -* HA Upgrade -* HA Upgrade for Air Gap Installs +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. {{% tabs %}} {{% tab "HA Upgrade" %}} diff --git a/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md index f48c1ad0c54..eee276b3c24 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/single-node/_index.md @@ -13,7 +13,7 @@ The following instructions will guide you through upgrading a high-availability - **Review the [Known Upgrade Issues]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/#known-upgrade-issues) and [Caveats]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/#caveats)** -- **[Air Gap Installs Only:]({{< baseurl >}}/rancher/v2.x/en/installations/air-gap/) Collect and Populate Images for the new Rancher server version** +- **[Air Gap Installs Only:]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap/) Collect and Populate Images for the new Rancher server version** Follow the guide to [populate your private registry]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. diff --git a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md index 9c10255eba0..90112383200 100644 --- a/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/discover-services/_index.md @@ -7,6 +7,10 @@ Service discovery is one of the core functionalities of any container-based envi This document will also show you how to link the workloads and services that you migrated into Rancher v2.x. When you parsed your services from v1.6 using migration-tools CLI, it output two files for each service: one deployment manifest and one service manifest. You'll have to link these two files together before the deployment works correctly in v2.x. +
Resolve the output.txt Link Directive
+ +![Resolve Link Directive]({{< baseurl >}}/img/rancher/resolve-links.png) + ## In This Document @@ -58,14 +62,22 @@ When you migrate v1.6 services to v2.x, Rancher does not automatically create a In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/#migration-example-files) are linked together. +
Linked Workload and Kubernetes Service
+ +![Linked Workload and Kubernetes Service]({{< baseurl >}}/img/rancher/linked-service-workload.png) + + ### Service Name Alias Creation Just as you can create an alias for Rancher v1.6 services, you can do the same for Rancher v2.x workloads. Similarly, you can also create DNS records pointing to services running externally, using either their hostname or IP address. These DNS records are Kubernetes service objects. -Using the v2.x UI, use the context menu to navigate to the `Project` view and choose the **Service Discovery** tab. All existing DNS records created for your workloads are listed under each namespace. +Using the v2.x UI, use the context menu to navigate to the `Project` view. Then click **Resources > Workloads > Service Discovery.** (In versions prior to v2.3.0, click the **Workloads > Service Discovery** tab.) All existing DNS records created for your workloads are listed under each namespace. Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods. +
Add Service Discovery Record
+![Add Service Discovery Record]({{< baseurl >}}/img/rancher/add-record.png) + The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. Option | Kubernetes-implemented? | Rancher-implemented? diff --git a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md index 5315469581a..3896fe2087f 100644 --- a/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/expose-services/_index.md @@ -61,7 +61,7 @@ For example, for the web-deployment.yml file parsed from v1.6 that we've been us
Port Mapping: Setting HostPort
-![Set HostPort]({{< baseurl >}}/img/rancher/set-hostport.gif) +{{< img "/img/rancher/set-hostport.gif" "Set HostPort">}} ## NodePort diff --git a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md index f5a1b6147d9..2f6a6237e85 100644 --- a/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/get-started/_index.md @@ -40,6 +40,10 @@ After provisioning your node(s), install Rancher: After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication). +
Rancher v2.x Authentication
+ +![Rancher v2.x Authentication]({{< baseurl >}}/img/rancher/auth-providers.svg) + ### Local Users Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{< baseurl >}}/rancher/v2.x/en/admin-settings/authentication/) and assign them access rights. diff --git a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md index b2996c8d41f..aa76e842de3 100644 --- a/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/load-balancing/_index.md @@ -9,6 +9,10 @@ As outlined in [its documentation]({{< baseurl >}}/rancher/v1.6/en/cattle/adding If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x. +
output.txt Load Balancer Directive
+ +![Resolve Load Balancer Directive]({{< baseurl >}}/img/rancher/resolve-load-balancer.png) + ## In This Document @@ -35,7 +39,7 @@ In Rancher v1.6, you could add port/service rules for configuring your HAProxy t Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. -By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only, but you can read about cloud providers in [our documentation]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers/). +By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only. RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. @@ -49,8 +53,16 @@ In Rancher v1.6 you could deploy a scalable load balancer service within your st +
Rancher v1.6 Load Balancing Architecture
+ +![Rancher v1.6 Load Balancing]({{< baseurl >}}/img/rancher/cattle-load-balancer.svg) + The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads. +
Rancher v2.x Load Balancing Architecture
+ +![Rancher v2.x Load Balancing]({{< baseurl >}}/img/rancher/kubernetes-load-balancer.svg) + ## Ingress Caveats Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balancing, you must use unique host names and paths when configuring your workloads. This limitation derives from: @@ -62,12 +74,18 @@ Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balan ## Deploying Ingress -You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Workloads** from the main menu. Then choose the **Load Balancing** tab and begin by clicking **Deploy**. During deployment, you can choose a target project or namespace. +You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Resources > Workloads > Load Balancing.** (In versions prior to v2.3.0, click **Workloads > Load Balancing.**) Then click **Deploy**. During deployment, you can choose a target project or namespace. >**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. > -For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and then select the **Load Balancing** tab. Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. +![Workload Scale]({{< baseurl >}}/img/rancher/workload-scale.png) + +For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions prior to v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. + +
Browsing to Load Balancer Tab and Adding Ingress
+ +![Adding Ingress]({{< baseurl >}}/img/rancher/add-ingress.gif) Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. @@ -77,8 +95,16 @@ Using Rancher v2.x, you can add Ingress rules that are based on host names or a For example, let's say you have multiple workloads deployed to a single namespace. You can add an Ingress to route traffic to these two workloads using the same hostname but different paths, as depicted in the image below. URL requests to `foo.com/name.html` will direct users to the `web` workload, and URL requests to `foo.com/login` will direct users to the `chat` workload. +
Ingress: Path-Based Routing Configuration
+ +![Ingress: Path-Based Routing Configuration]({{< baseurl >}}/img/rancher/add-ingress-form.png) + Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address. +
Workload Links
+ +![Load Balancer Links to Workloads]({{< baseurl >}}/img/rancher/load-balancer-links.png) + The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: ``` @@ -92,6 +118,10 @@ Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want - We recommend [uploading a certificate]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. - If you have configured [NGINX default certificate]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. +
Load Balancer Configuration: SSL/TLS Certificate Section
+ +![SSL/TLS Certificates Section]({{< baseurl >}}/img/rancher/load-balancer-ssl-certs.png) + ### TCP Load Balancing Options #### Layer-4 Load Balancer @@ -100,6 +130,10 @@ For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`. +
Workload Deployment: Layer 4 Load Balancer Creation
+ +![Deploy Layer-4 Load Balancer]({{< baseurl >}}/img/rancher/deploy-workload-load-balancer.png) + Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. #### NGINX Ingress Controller TCP Support by ConfigMaps @@ -110,6 +144,8 @@ However, there is a workaround to use NGINX's TCP balancing by creating a Kubern To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. +![Layer-4 Load Balancer: ConfigMap Workaround]({{< baseurl >}}/img/rancher/layer-4-lb-config-map.png) + The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. ## Rancher v2.x Load Balancing Limitations diff --git a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md index ad16e2cac2c..d018c49e4e4 100644 --- a/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md +++ b/content/rancher/v2.x/en/v1.6-migration/monitor-apps/_index.md @@ -11,6 +11,10 @@ Use this document to correct Rancher v2.x workloads and services that list `heal For example, for the image below, we would configure liveness probes for the `web` and `weblb` workloads (i.e., the Kubernetes manifests output by migration-tools CLI). +
Resolve health_check for the web and webLB Workloads
+ +![Resolve health_check]({{< baseurl >}}/img/rancher/resolve-health-checks.png) + ## In This Document @@ -37,6 +41,8 @@ The health check microservice features two types of health checks, which have a The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. +![Rancher v1.6 Health Checks]({{}}/img/rancher/healthcheck.svg) + ## Rancher v2.x Health Checks In Rancher v2.x, the health check microservice is replaced with Kubernete's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. @@ -63,6 +69,8 @@ Kubernetes includes two different _types_ of probes: liveness checks and readine The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. +![Rancher v2.x Probes]({{}}/img/rancher/probes.svg) + ## Configuring Probes in Rancher v2.x The [migration-tool CLI]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. @@ -73,6 +81,10 @@ If the probe fails, the container is restarted per the restartPolicy defined in Configure probes by using the **Health Check** section while editing deployments called out in `output.txt`. +
Edit Deployment: Health Check Section
+ +![Health Check Section]({{< baseurl >}}/img/rancher/health-check-section.png) + ### Configuring Checks While you create a workload using Rancher v2.x, we recommend configuring a check that monitors the health of the deployment's pods. @@ -85,6 +97,8 @@ TCP checks monitor your deployment's health by attempting to open a connection t You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). +![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) + When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. +
Rancher v2.x: Workload Deployment
+ +![Workload Tab and Group by Node Icon]({{< baseurl >}}/img/rancher/schedule-specific-node.png) + Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. If you expose the workload using a NodePort that conflicts with another workload, the deployment gets created successfully, but no NodePort service is created. Therefore, the workload isn't exposed outside of the cluster. -After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the **Workloads** tab, click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. +After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions prior to v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. + +![Pods Scheduled to Same Node]({{< baseurl >}}/img/rancher/scheduled-nodes.png) ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. To create a daemonset while configuring a workload, choose **Run one pod on each node** from the **Workload Type** options. +
Workload Configuration: Choose run one pod on each node to configure daemonset
+ +![choose Run one pod on each node]({{< baseurl >}}/img/rancher/workload-type.png) + ### Scheduling Pods Using Resource Constraints While creating a service in the Rancher v1.6 UI, you could schedule its containers to hosts based on hardware requirements that you choose. The containers are then scheduled to hosts based on which ones have bandwidth, memory, and CPU capacity. @@ -204,6 +238,10 @@ To declare resource constraints, edit your migrated workloads, editing the **Sec - Memory Limit - CPU Limit +
Scheduling: Resource Constraint Settings
+ +![Resource Constraint Settings]({{< baseurl >}}/img/rancher/resource-constraint-settings.png) + You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). ### [Next: Service Discovery]({{< baseurl >}}/rancher/v2.x/en/v1.6-migration/discover-services/) diff --git a/content/rke/latest/en/_index.md b/content/rke/latest/en/_index.md index 9d5fa4cf5a9..edd0d014bee 100644 --- a/content/rke/latest/en/_index.md +++ b/content/rke/latest/en/_index.md @@ -4,4 +4,4 @@ shortTitle: RKE weight: 1 --- -Rancher Kubernetes Engine (RKE) is a light-weight Kubernetes installer that supports installation on bare-metal and virtualized servers. RKE solves a common issue in the Kubernetes community: installation complexity. With RKE, Kubernetes installation is simplified, regardless of what operating systems and platforms you're running. +Rancher Kubernetes Engine (RKE) is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. It works on bare-metal and virtualized servers. RKE solves the problem of installation complexity, a common issue in the Kubernetes community. With RKE, the installation and operation of Kubernetes is both simplified and easily automated, and it's entirely independent of the operating system and platform you're running. As long as you can run a supported version of Docker, you can deploy and run Kubernetes with RKE. diff --git a/content/rke/latest/en/config-options/_index.md b/content/rke/latest/en/config-options/_index.md index b942e5e6f3e..f8a920363e6 100644 --- a/content/rke/latest/en/config-options/_index.md +++ b/content/rke/latest/en/config-options/_index.md @@ -67,6 +67,11 @@ kubernetes_version: "v1.11.6-rancher1-1" In case both `kubernetes_version` and [system images]({{< baseurl >}}/rke/latest/en/config-options/system-images/) are defined, the system images configuration will take precedence over `kubernetes_version`. +> **Note:** In RKE, `kubernetes_version` is used to map the version of Kubernetes to the default services, parameters, and options: + +> - For RKE v0.3.0+, the service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). +> - For RKE prior to v0.3.0, the service defaults are located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). Note: The version in the path of the service defaults file corresponds to a Rancher version. Therefore, for Rancher v2.1.x, [this file](https://github.com/rancher/types/blob/release/v2.1/apis/management.cattle.io/v3/k8s_defaults.go) should be used. + #### Listing Supported Kubernetes Versions Please refer to the [release notes](https://github.com/rancher/rke/releases) of the RKE version that you are running, to find the list of supported Kubernetes versions as well as the default Kubernetes version. diff --git a/content/rke/latest/en/config-options/add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/_index.md index bf58a5b7f14..a665230b268 100644 --- a/content/rke/latest/en/config-options/add-ons/_index.md +++ b/content/rke/latest/en/config-options/add-ons/_index.md @@ -3,23 +3,20 @@ title: Add-Ons weight: 260 --- -RKE supports pluggable add-ons. Add-ons are used to deploy several cluster components including: +RKE supports configuring pluggable add-ons in the cluster YML. Add-ons are used to deploy several cluster components including: * [Network plug-ins]({{< baseurl >}}/rke/latest/en/config-options/add-ons/network-plugins/) * [Ingress controller]({{< baseurl >}}/rke/latest/en/config-options/add-ons/ingress-controllers/) * [DNS provider]({{< baseurl >}}/rke/latest/en/config-options/add-ons/dns/) * [Metrics Server]({{< baseurl >}}/rke/latest/en/config-options/add-ons/metrics-server/) -The images used for these add-ons under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. +These add-ons require images that can be found under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. -In addition to these pluggable add-ons, you can specify an add-on that you want deployed after the cluster deployment is complete. - -RKE only adds additional add-ons when using `rke up` multiple times. RKE does **not** support removing of cluster add-ons when doing `rke up` with a different list of add-ons. - -As of v0.1.8, RKE will update an add-on if it is the same name. - -Prior to v0.1.8, update any add-ons by using `kubectl edit`. +There are a few things worth noting: +* In addition to these pluggable add-ons, you can specify an add-on that you want deployed after the cluster deployment is complete. +* As of v0.1.8, RKE will update an add-on if it is the same name. +* Prior to v0.1.8, update any add-ons by using `kubectl edit`. ## Critical and Non-Critical Add-ons diff --git a/content/rke/latest/en/config-options/add-ons/dns/_index.md b/content/rke/latest/en/config-options/add-ons/dns/_index.md index 636aa6aee2f..a00aa2e5a12 100644 --- a/content/rke/latest/en/config-options/add-ons/dns/_index.md +++ b/content/rke/latest/en/config-options/add-ons/dns/_index.md @@ -16,6 +16,8 @@ RKE provides the following DNS providers that can be deployed as add-ons: CoreDNS was made the default in RKE v0.2.5 when using Kubernetes 1.14 and higher. If you are using an RKE version lower than v0.2.5, kube-dns will be deployed by default. +> **Note:** If you switch from one DNS provider to another, the existing DNS provider will be removed before the new one is deployed. + # CoreDNS _Available as of v0.2.5_ diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md index b8f7a26ab82..cb26c78fe57 100644 --- a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md +++ b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md @@ -10,6 +10,8 @@ RKE provides the following network plug-ins that are deployed as add-ons: - Canal - Weave +> **Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn’t allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + By default, the network plug-in is `canal`. If you want to use another network plug-in, you need to specify which network plug-in to enable at the cluster level in the `cluster.yml`. ```yaml @@ -20,7 +22,7 @@ network: The images used for network plug-ins are under the [`system_images` directive]({{< baseurl >}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each network plug-in, but these can be overridden by changing the image tag in `system_images`. -## Disabling deployment of a network plug-in +# Disabling Deployment of a Network Plug-in You can disable deploying a network plug-in by specifying `none` to the network `plugin` directive in the cluster configuration. @@ -29,11 +31,11 @@ network: plugin: none ``` -## Network Plug-in Options +# Network Plug-in Options Besides the different images that could be used to deploy network plug-ins, certain network plug-ins support additional options that can be used to customize the network plug-in. -### Canal Network Plug-in Options +## Canal Network Plug-in Options ```yaml network: @@ -48,7 +50,7 @@ network: By setting the `canal_iface`, you can configure the interface to use for inter-host communication. The `canal_flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. -### Flannel Network Plug-in Options +## Flannel Network Plug-in Options ```yaml network: @@ -63,7 +65,7 @@ network: By setting the `flannel_iface`, you can configure the interface to use for inter-host communication. The `flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. -### Calico Network Plug-in Options +## Calico Network Plug-in Options ```yaml network: @@ -80,7 +82,7 @@ Calico currently only supports 2 cloud providers, AWS or GCE, which can be set u - `aws` - `gce` -### Weave Network Plug-in Options +## Weave Network Plug-in Options ```yaml network: @@ -92,3 +94,8 @@ network: #### Weave encryption Weave encryption can be enabled by passing a string password to the network provider config. + + +## Custom Network Plug-ins + +It is possible to add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. In the `addons` field, you can add the add-on manifest of a cluster that has the network plugin-that you want, as shown in [this example.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md new file mode 100644 index 00000000000..d942be998aa --- /dev/null +++ b/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md @@ -0,0 +1,207 @@ +--- +title: Custom Network Plug-in Example +weight: 1 +--- + +The below example shows how to configure a custom network plug-in with an in-line add-on to the `cluster.yml`. + +First, to edit the network plug-ins, change the `network` section of the YAML from: + +``` +network: + options: + flannel_backend_type: "vxlan" + plugin: "canal" +``` +to: +``` +network: + plugin: none +``` + +Then, in the `addons` section of the `cluster.yml`, you can add the add-on manifest of a cluster that has the network plugin-that you want. In the below example, we are replacing the Canal plugin with a Flannel plugin by adding the add-on manifest for the cluster through the `addons` field: + +``` +addons: |- + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel + subjects: + - kind: ServiceAccount + name: flannel + namespace: kube-system + --- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-cfg + namespace: "kube-system" + labels: + tier: node + app: flannel + data: + cni-conf.json: | + { + "name":"cbr0", + "cniVersion":"0.3.1", + "plugins":[ + { + "type":"flannel", + "delegate":{ + "forceAddress":true, + "isDefaultGateway":true + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] + } + net-conf.json: | + { + "Network": "10.42.0.0/16", + "Backend": { + "Type": "vxlan" + } + } + --- + apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: kube-flannel + namespace: "kube-system" + labels: + tier: node + k8s-app: flannel + spec: + template: + metadata: + labels: + tier: node + k8s-app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: NotIn + values: + - windows + serviceAccountName: flannel + containers: + - name: kube-flannel + image: rancher/coreos-flannel:v0.10.0-rancher1 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 150m + memory: 64M + command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: install-cni + image: rancher/flannel-cni:v0.3.0-rancher1 + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-flannel-cfg + key: cni-conf.json + - name: CNI_CONF_NAME + value: "10-flannel.conflist" + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + - operator: Exists + effect: NoExecute + - key: node.kubernetes.io/not-ready + effect: NoSchedule + operator: Exists + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: 20% + type: RollingUpdate + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: flannel + namespace: kube-system +``` +**Result:** The cluster is up with the custom network plug-in. \ No newline at end of file diff --git a/content/rke/latest/en/config-options/authorization/_index.md b/content/rke/latest/en/config-options/authorization/_index.md index 1bfd1f16084..6d40ca89548 100644 --- a/content/rke/latest/en/config-options/authorization/_index.md +++ b/content/rke/latest/en/config-options/authorization/_index.md @@ -5,7 +5,7 @@ weight: 240 Kubernetes supports multiple [Authorization Modules](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules). Currently, RKE only supports the [RBAC module](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). -By default, RBAC is already enabled. If you wanted to turn off RBAC support, **which isn't recommended**, you set the authorization mode to `none`. +By default, RBAC is already enabled. If you wanted to turn off RBAC support, **which isn't recommended**, you set the authorization mode to `none` in your `cluster.yml`. ```yaml authorization: diff --git a/content/rke/latest/en/config-options/bastion-host/_index.md b/content/rke/latest/en/config-options/bastion-host/_index.md index bade5d19232..4e0f04260d1 100644 --- a/content/rke/latest/en/config-options/bastion-host/_index.md +++ b/content/rke/latest/en/config-options/bastion-host/_index.md @@ -3,7 +3,7 @@ title: Bastion/Jump Host Configuration weight: 220 --- -Since RKE uses `ssh` to connect to [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/), you can configure to use a bastion host. Keep in mind that the [port requirements]({{< baseurl >}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. +Since RKE uses `ssh` to connect to [nodes]({{< baseurl >}}/rke/latest/en/config-options/nodes/), you can configure the `cluster.yml` so RKE will use a bastion host. Keep in mind that the [port requirements]({{< baseurl >}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. our private SSH key(s) only needs to reside on the host running RKE. You do not need to copy your private SSH key(s) to the bastion host. ```yaml bastion_host: diff --git a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md b/content/rke/latest/en/config-options/cloud-providers/aws/_index.md index 278a54b1230..9ab6c05524d 100644 --- a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md +++ b/content/rke/latest/en/config-options/cloud-providers/aws/_index.md @@ -3,7 +3,7 @@ title: AWS Cloud Provider weight: 251 --- -To enable the AWS cloud provider, there are no configuration options. You only need to set the name as `aws`. In order to use the AWS cloud provider, all cluster nodes must have already been configured with an [appropriate IAM role](#iam-requirements) and your AWS resources must be [tagged with a cluster ID](#tagging-amazon-resources). +To enable the AWS cloud provider, there are no RKE configuration options. You only need to set the name as `aws`. In order to use the AWS cloud provider, all cluster nodes must have already been configured with an [appropriate IAM role](#iam-requirements) and your AWS resources must be [tagged with a cluster ID](#tagging-amazon-resources). ```yaml cloud_provider: diff --git a/content/rke/latest/en/config-options/nodes/_index.md b/content/rke/latest/en/config-options/nodes/_index.md index 79a8314ad87..75321c4c6b9 100644 --- a/content/rke/latest/en/config-options/nodes/_index.md +++ b/content/rke/latest/en/config-options/nodes/_index.md @@ -5,9 +5,33 @@ weight: 210 The `nodes` directive is the only required section in the `cluster.yml` file. It's used by RKE to specify cluster node(s), ssh credentials used to access the node(s) and which roles these nodes will be in the Kubernetes cluster. +This section covers the following topics: + +- [Node configuration example](#node-configuration-example) +- [Kubernetes roles](#kubernetes-roles) + - [etcd](#etcd) + - [Controlplane](#controlplane) + - [Worker](#worker) +- [Node options](#node-options) + - [Address](#address) + - [Internal address](#internal-address) + - [Overriding the hostname](#overriding-the-hostname) + - [SSH port](#ssh-port) + - [SSH users](#ssh-users) + - [SSH key path](#ssh-key-path) + - [SSH key](#ssh-key) + - [SSH certificate path](#ssh-certificate-path) + - [SSH certificate](#ssh-certificate) + - [Docker socket](#docker-socket) + - [Labels](#labels) + - [Taints](#taints) + +# Node Configuration Example + +The following example shows node configuration in an example `cluster.yml`: + ```yaml nodes: - nodes: - address: 1.1.1.1 user: ubuntu role: @@ -50,7 +74,33 @@ nodes: app: ingress ``` -## Node Options +# Kubernetes Roles + +You can specify the list of roles that you want the node to be as part of the Kubernetes cluster. Three roles are supported: `controlplane`, `etcd` and `worker`. Node roles are not mutually exclusive. It's possible to assign any combination of roles to any node. It's also possible to change a node's role using the upgrade process. + +> **Note:** Prior to v0.1.8, workloads/pods might have run on any nodes with `worker` or `controlplane` roles, but as of v0.1.8, they will only be deployed to any `worker` nodes. + +### etcd + +With this role, the `etcd` container will be run on these nodes. Etcd keeps the state of your cluster and is the most important component in your cluster, single source of truth of your cluster. Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. Etcd is a distributed reliable key-value store which stores all Kubernetes state. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **etcd** role is shown below: + +Taint Key | Taint Value | Taint Effect +---------------------------------------|--------------|-------------- +`node-role.kubernetes.io/etcd` | `true` | `NoExecute` + +### Controlplane + +With this role, the stateless components that are used to deploy Kubernetes will run on these nodes. These components are used to run the API server, scheduler, and controllers. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **controlplane** role is shown below: + +Taint Key | Taint Value | Taint Effect +---------------------------------------|--------------|-------------- +`node-role.kubernetes.io/controlplane` | `true` | `NoSchedule` + +### Worker + +With this role, any workloads or pods that are deployed will land on these nodes. + +# Node Options Within each node, there are multiple directives that can be used. @@ -94,32 +144,6 @@ For each node, you can specify the path, i.e. `ssh_cert_path`, for the signed SS Instead of setting the path to the signed SSH certificate, you can alternatively specify the actual certificate, i.e. `ssh_cert`, to be used to connect to the node. -### Kubernetes Roles - -You can specify the list of roles that you want the node to be as part of the Kubernetes cluster. Three roles are supported: `controlplane`, `etcd` and `worker`. Node roles are not mutually exclusive. It's possible to assign any combination of roles to any node. It's also possible to change a node's role using the upgrade process. - -> **Note:** Prior to v0.1.8, workloads/pods might have run on any nodes with `worker` or `controlplane` roles, but as of v0.1.8, they will only be deployed to any `worker` nodes. - -* **etcd** - -With this role, the `etcd` container will be run on these nodes. Etcd keeps the state of your cluster and is the most important component in your cluster, single source of truth of your cluster. Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. Etcd is a distributed reliable key-value store which stores all Kubernetes state. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **etcd** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/etcd` | `true` | `NoExecute` - -* **controlplane** - -With this role, the stateless components that are used to deploy Kubernetes will run on these nodes. These components are used to run the API server, scheduler, and controllers. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **controlplane** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/controlplane` | `true` | `NoSchedule` - -* **worker** - -With this role, any workloads or pods that are deployed will land on these nodes. - ### Docker Socket If the Docker socket is different than the default, you can set the `docker_socket`. The default is `/var/run/docker.sock` diff --git a/content/rke/latest/en/config-options/private-registries/_index.md b/content/rke/latest/en/config-options/private-registries/_index.md index b835a3480a2..5a5c1a4d18e 100644 --- a/content/rke/latest/en/config-options/private-registries/_index.md +++ b/content/rke/latest/en/config-options/private-registries/_index.md @@ -3,7 +3,7 @@ title: Private Registries weight: 215 --- -RKE supports the ability to configure multiple private Docker registries. By passing in your registry and credentials, it allows the nodes to pull images from these private registries. +RKE supports the ability to configure multiple private Docker registries in the `cluster.yml`. By passing in your registry and credentials, it allows the nodes to pull images from these private registries. ```yaml private_registries: diff --git a/content/rke/latest/en/config-options/services/services-extras/_index.md b/content/rke/latest/en/config-options/services/services-extras/_index.md index 867d6a98fa2..57f623800ab 100644 --- a/content/rke/latest/en/config-options/services/services-extras/_index.md +++ b/content/rke/latest/en/config-options/services/services-extras/_index.md @@ -9,10 +9,16 @@ RKE supports additional service arguments, volume binds and environment variable For any of the Kubernetes services, you can update the `extra_args` to change the existing defaults. -As of `v0.1.3`, using `extra_args` will add new arguments and **override** any existing defaults. For example, if you need to modify the default admission controllers list, you need to include the default list and edit it with your changes so all changes are included. +As of `v0.1.3`, using `extra_args` will add new arguments and **override** any existing defaults. For example, if you need to modify the default admission plugins list, you need to include the default list and edit it with your changes so all changes are included. Prior to `v0.1.3`, using `extra_args` would only add new arguments to the list and there was no ability to change the default list. +All service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version): + +- For RKE v0.3.0+, the service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version). The service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). The default list of admissions plugins is the same for all Kubernetes versions and is located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go#L11). + +- For RKE prior to v0.3.0, the service defaults and admission plugins are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version) and located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). + ```yaml services: kube-controller: diff --git a/content/rke/latest/en/config-options/system-images/_index.md b/content/rke/latest/en/config-options/system-images/_index.md index 287551e7c26..ae16387c7cc 100644 --- a/content/rke/latest/en/config-options/system-images/_index.md +++ b/content/rke/latest/en/config-options/system-images/_index.md @@ -6,7 +6,7 @@ When RKE is deploying Kubernetes, there are several images that are pulled. Thes As of `v0.1.6`, the functionality of a couple of the system images were consolidated into a single `rancher/rke-tools` image to simplify and speed the deployment process. -You can configure the [network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/), [ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [dns provider]({{}}/rke/latest/en/config-options/add-ons/dns/) as well as the options for these add-ons separately. +You can configure the [network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/), [ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [dns provider]({{}}/rke/latest/en/config-options/add-ons/dns/) as well as the options for these add-ons separately in the `cluster.yml`. Below is an example of the list of system images used to deploy Kubernetes through RKE. The default versions of Kubernetes are tied to specific versions of system images. diff --git a/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md index 624f2478dd0..a5435ddfbe1 100644 --- a/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md +++ b/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md @@ -38,14 +38,13 @@ time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18: |**secret_key** |S3 secret key with permission to access the backup bucket.| * | |**region** |S3 region for the backup bucket. This is optional.| * | |**endpoint** |S3 regions endpoint for the backup bucket.| * | -| **endpoint-ca** | Custom CA certificate to connect to custom S3 endpoint. Provided as a multi-line string. _Available as of v0.2.5_ | *| |**custom_ca** |Custom certificate authority to use when connecting to the endpoint. Only required for private S3 compatible storage solutions. Available for RKE v0.2.5+.| * | The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. ##### Using a custom CA certificate for S3 -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the option `endpoint-ca` to connect to the S3 backend. +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the option `custom_ca` to connect to the S3 backend. ### IAM Support for Storing Snapshots in S3 @@ -53,7 +52,7 @@ In addition to API access keys, RKE supports using IAM roles for S3 authenticati Below is an [example IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) that would allow nodes to store and retrieve backups from S3: -``` +```json { "Version": "2012-10-17", "Statement": [ diff --git a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md index c6dae0f85fc..a4e0ce38419 100644 --- a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md +++ b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md @@ -8,7 +8,9 @@ The details of restoring your cluster from backup are different depending on you {{% tabs %}} {{% tab "RKE v0.2.0+"%}} -If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts the etcd to a specific snapshot. The following actions are included in the command: +If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. + +The following actions will be performed when you run the command: - Syncs the snapshot or downloads the snapshot from S3, if necessary. - Checks snapshot checksum across etcd nodes to make sure they are identical. @@ -71,9 +73,9 @@ $ rke etcd snapshot-restore \ {{% /tab %}} {{% tab "RKE prior to v0.2.0"%}} -If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot. +If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. -The following actions are included in `rke etcd snapshot-restore`: +The following actions will be performed when you run the command: - Removes the old etcd cluster - Rebuilds the etcd cluster using the local snapshot diff --git a/content/rke/latest/en/os/_index.md b/content/rke/latest/en/os/_index.md index f76b057e82e..92d21f5e5b5 100644 --- a/content/rke/latest/en/os/_index.md +++ b/content/rke/latest/en/os/_index.md @@ -38,6 +38,8 @@ RKE runs on almost any Linux OS with Docker installed. Most of the development a ``` usermod -aG docker ``` + +> **Note:** Users added to the `docker` group are granted effective root permissions on the host by means of the Docker API. Only choose a user that is intended for this purpose and has its credentials and access properly secured. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to see how you can configure access to Docker without using the `root` user. @@ -164,13 +166,21 @@ By default, Atomic hosts do not come with a Docker group. You can update the own ## Software -- Docker - Each Kubernetes version supports different Docker versions. +This section describes the requirements for Docker, Kubernetes, and SSH. -Kubernetes Version | Supported Docker version(s) | -----|----| -v1.13.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | -v1.12.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | -v1.11.x | RHEL Docker 1.13, 17.03.2, 18.06.2, 18.09.2 | +### OpenSSH + +In order to SSH into each node, OpenSSH 7.0+ must be installed on each node. + +### Kubernetes + +Refer to the [RKE release notes](https://github.com/rancher/rke/releases) for the supported versions of Kubernetes. + +### Docker + +Each Kubernetes version supports different Docker versions. The Kubernetes release notes contain the [current list](https://kubernetes.io/docs/setup/release/notes/#dependencies) of validated Docker versions. + +### Installing Docker You can either follow the [Docker installation](https://docs.docker.com/install/) instructions or use one of Rancher's [install scripts](https://github.com/rancher/install-docker) to install Docker. For RHEL, please see [How to install Docker on Red Hat Enterprise Linux 7](https://access.redhat.com/solutions/3727511). @@ -180,6 +190,8 @@ Docker Version | Install Script | 18.06.2 | curl https://releases.rancher.com/install-docker/18.06.2.sh | sh | 17.03.2 | curl https://releases.rancher.com/install-docker/17.03.2.sh | sh | +### Checking the Installed Docker Version + Confirm that a Kubernetes supported version of Docker is installed on your machine, by running `docker version --format '{{.Server.Version}}'`. ``` @@ -187,10 +199,7 @@ docker version --format '{{.Server.Version}}' 17.03.2-ce ``` -- OpenSSH 7.0+ - In order to SSH into each node, OpenSSH must be installed on each node. - ## Ports - {{< ports-rke-nodes >}} {{< requirements_ports_rke >}} diff --git a/layouts/shortcodes/img.html b/layouts/shortcodes/img.html new file mode 100644 index 00000000000..174d4b38e2b --- /dev/null +++ b/layouts/shortcodes/img.html @@ -0,0 +1,17 @@ +{{ $img := .Get 0 }} +{{ $alt := .Get 1 }} +{{ with resources.Get $img }} + {{ $thumb20 := .Resize "2000x" }} + {{ $thumb16 := .Resize "1600x" }} + {{ $thumb12 := .Resize "1200x" }} + {{ $thumb10 := .Resize "1000x" }} + {{ $thumb8 := .Resize "800x" }} + {{ $thumb6 := .Resize "600x" }} + {{ $thumb4 := .Resize "400x" }} + {{ $thumb2 := .Resize "200x" }} + {{$alt}} +{{ end }} diff --git a/layouts/shortcodes/requirements_ports_rancher.html b/layouts/shortcodes/requirements_ports_rancher.html deleted file mode 100644 index 62d2297d16e..00000000000 --- a/layouts/shortcodes/requirements_ports_rancher.html +++ /dev/null @@ -1,59 +0,0 @@ -
-

Rancher nodes:
Nodes running the rancher/rancher container

-

Rancher nodes - Inbound rules

- - - - - - - - - - - - - - - - - - - -
ProtocolPortSourceDescription
TCP80
  • Load balancer/proxy that does external SSL termination
Rancher UI/API when external SSL termination is used
TCP443
  • etcd nodes
  • controlplane nodes
  • worker nodes
  • Hosted/Imported Kubernetes
  • any that needs to be able to use UI/API
Rancher agent, Rancher UI/API, kubectl
-

Rancher nodes - Outbound rules

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ProtocolPortDestinationDescription
TCP22
  • Any node IP from a node created using Node Driver
SSH provisioning of nodes using Node Driver
TCP443
  • 35.160.43.145/32
  • 35.167.242.46/32
  • 52.33.59.17/32
git.rancher.io (catalogs)
TCP2376
  • Any node IP from a node created using Node Driver
Docker daemon TLS port used by Docker Machine
TCP6443
  • Hosted/Imported Kubernetes API
Kubernetes apiserver
- -
-
diff --git a/static/img/k3s/k3s-production-setup.svg b/static/img/k3s/k3s-production-setup.svg new file mode 100644 index 00000000000..2d132eb9566 --- /dev/null +++ b/static/img/k3s/k3s-production-setup.svg @@ -0,0 +1,1176 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/img/rancher/add-custom-metrics.gif b/static/img/rancher/add-custom-metrics.gif new file mode 100644 index 00000000000..9c6405a3433 Binary files /dev/null and b/static/img/rancher/add-custom-metrics.gif differ diff --git a/static/img/rancher/add-ingress-form.png b/static/img/rancher/add-ingress-form.png new file mode 100644 index 00000000000..405ff3abf1e Binary files /dev/null and b/static/img/rancher/add-ingress-form.png differ diff --git a/static/img/rancher/add-ingress.gif b/static/img/rancher/add-ingress.gif new file mode 100644 index 00000000000..b9a3f449d5b Binary files /dev/null and b/static/img/rancher/add-ingress.gif differ diff --git a/static/img/rancher/add-node-label.gif b/static/img/rancher/add-node-label.gif new file mode 100644 index 00000000000..9c41e774064 Binary files /dev/null and b/static/img/rancher/add-node-label.gif differ diff --git a/static/img/rancher/add-pod-label.gif b/static/img/rancher/add-pod-label.gif new file mode 100644 index 00000000000..b78da3ce7cb Binary files /dev/null and b/static/img/rancher/add-pod-label.gif differ diff --git a/static/img/rancher/add-record.png b/static/img/rancher/add-record.png new file mode 100644 index 00000000000..8838a5ea6ff Binary files /dev/null and b/static/img/rancher/add-record.png differ diff --git a/static/img/rancher/auth-providers.svg b/static/img/rancher/auth-providers.svg new file mode 100644 index 00000000000..8b53323d25a --- /dev/null +++ b/static/img/rancher/auth-providers.svg @@ -0,0 +1,2 @@ + +
Rancher
Authentication
Proxy
[Not supported by viewer]
Authentication Providers
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/cattle-load-balancer.svg b/static/img/rancher/cattle-load-balancer.svg new file mode 100644 index 00000000000..70db25baa0b --- /dev/null +++ b/static/img/rancher/cattle-load-balancer.svg @@ -0,0 +1,2 @@ + +
Cattle Environment
[Not supported by viewer]
Host 1
Host 1
Host 2
Host 2
haproxy
haproxy
haproxy
haproxy
chat 1
[Not supported by viewer]
web 1
web 1
web 2
web 2

<div style="text-align: center ; font-size: 18px"><br></div>
Host 3
Host 3
Host 4
Host 4
haproxy
haproxy
haproxy
haproxy
chat 2
chat 2
web 3
web 3
chat 3
chat 3

<div style="text-align: center ; font-size: 18px"><br></div>
Load Balancer 1
Load Balancer 1
Load Balancer 2
Load Balancer 2
Resolves to: 

- Host 1 IP: 80
- Host 2 IP: 80
[Not supported by viewer]
Resolves to: 

- Host 3 IP: 80
- Host 4 IP: 80
[Not supported by viewer]
web.com/login
web.com/login
chat.com/login
chat.com/login
\ No newline at end of file diff --git a/static/img/rancher/deploy-service.gif b/static/img/rancher/deploy-service.gif new file mode 100644 index 00000000000..bf97d1690e7 Binary files /dev/null and b/static/img/rancher/deploy-service.gif differ diff --git a/static/img/rancher/deploy-workload-hostport.png b/static/img/rancher/deploy-workload-hostport.png new file mode 100644 index 00000000000..ec6193df3c4 Binary files /dev/null and b/static/img/rancher/deploy-workload-hostport.png differ diff --git a/static/img/rancher/deploy-workload-load-balancer.png b/static/img/rancher/deploy-workload-load-balancer.png new file mode 100644 index 00000000000..4751b599a28 Binary files /dev/null and b/static/img/rancher/deploy-workload-load-balancer.png differ diff --git a/static/img/rancher/deploy-workload-nodeport.png b/static/img/rancher/deploy-workload-nodeport.png new file mode 100644 index 00000000000..d1cfa67e35b Binary files /dev/null and b/static/img/rancher/deploy-workload-nodeport.png differ diff --git a/static/img/rancher/edit-migration-workload.gif b/static/img/rancher/edit-migration-workload.gif new file mode 100644 index 00000000000..f9510b8ff9f Binary files /dev/null and b/static/img/rancher/edit-migration-workload.gif differ diff --git a/static/img/rancher/enable-cluster-monitoring.gif b/static/img/rancher/enable-cluster-monitoring.gif new file mode 100644 index 00000000000..baef3cc2487 Binary files /dev/null and b/static/img/rancher/enable-cluster-monitoring.gif differ diff --git a/static/img/rancher/enable-project-monitoring.gif b/static/img/rancher/enable-project-monitoring.gif new file mode 100644 index 00000000000..f44c67eb8f7 Binary files /dev/null and b/static/img/rancher/enable-project-monitoring.gif differ diff --git a/static/img/rancher/health-check-section.png b/static/img/rancher/health-check-section.png new file mode 100644 index 00000000000..4a4bfafe128 Binary files /dev/null and b/static/img/rancher/health-check-section.png differ diff --git a/static/img/rancher/healthcheck-cmd-exec.png b/static/img/rancher/healthcheck-cmd-exec.png new file mode 100644 index 00000000000..06b6b22ab6c Binary files /dev/null and b/static/img/rancher/healthcheck-cmd-exec.png differ diff --git a/static/img/rancher/healthcheck.svg b/static/img/rancher/healthcheck.svg new file mode 100644 index 00000000000..55b573e578f --- /dev/null +++ b/static/img/rancher/healthcheck.svg @@ -0,0 +1,2 @@ + +
Rancher v1.6 Stack
[Not supported by viewer]
Node
[Not supported by viewer]
Nginx
Nginx
Node
[Not supported by viewer]
Healthcheck
Microservice
[Not supported by viewer]
2. Monitored container responds 
to check with a response (success)
or no response (failure).
[Not supported by viewer]
1. Healthcheck Microservice 
checks for open port (TCP)
or makes a GET request (HTTP)
across hosts to monitored container.
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/import-yaml-error.png b/static/img/rancher/import-yaml-error.png new file mode 100644 index 00000000000..8af7a0878ce Binary files /dev/null and b/static/img/rancher/import-yaml-error.png differ diff --git a/static/img/rancher/imported-workloads.png b/static/img/rancher/imported-workloads.png new file mode 100644 index 00000000000..75142fd0510 Binary files /dev/null and b/static/img/rancher/imported-workloads.png differ diff --git a/static/img/rancher/kubernetes-load-balancer.svg b/static/img/rancher/kubernetes-load-balancer.svg new file mode 100644 index 00000000000..bf9de1a3986 --- /dev/null +++ b/static/img/rancher/kubernetes-load-balancer.svg @@ -0,0 +1,2 @@ + +
Kubernetes Cluster
[Not supported by viewer]
Node 3
Node 3
Node 4
Node 4
Ingress Controller
[Not supported by viewer]
Ingress Controller
[Not supported by viewer]
chat 2
chat 2
web 3
web 3
chat 3
chat 3
Node 1
Node 1
Node 2
Node 2
Ingress Controller
[Not supported by viewer]
Ingress Controller
[Not supported by viewer]
chat 1
[Not supported by viewer]
web 1
web 1
web 2
web 2
Resolves to: 

- Node 1 IP: 80
- Node 2 IP: 80
- Node 3 IP: 80
- Nod 4 IP: 80
[Not supported by viewer]
web.com/login
web.com/login
chat.com/login
chat.com/login
Nginx Global Load Balancer
Nginx Global Load Balancer
\ No newline at end of file diff --git a/static/img/rancher/layer-4-lb-config-map.png b/static/img/rancher/layer-4-lb-config-map.png new file mode 100644 index 00000000000..cf5c9dc168d Binary files /dev/null and b/static/img/rancher/layer-4-lb-config-map.png differ diff --git a/static/img/rancher/linked-service-workload.png b/static/img/rancher/linked-service-workload.png new file mode 100644 index 00000000000..e0a1da0981f Binary files /dev/null and b/static/img/rancher/linked-service-workload.png differ diff --git a/static/img/rancher/liveness-check.png b/static/img/rancher/liveness-check.png new file mode 100644 index 00000000000..e88cb297aae Binary files /dev/null and b/static/img/rancher/liveness-check.png differ diff --git a/static/img/rancher/load-balancer-links.png b/static/img/rancher/load-balancer-links.png new file mode 100644 index 00000000000..5121abd0795 Binary files /dev/null and b/static/img/rancher/load-balancer-links.png differ diff --git a/static/img/rancher/load-balancer-ssl-certs.png b/static/img/rancher/load-balancer-ssl-certs.png new file mode 100644 index 00000000000..246ffd618f8 Binary files /dev/null and b/static/img/rancher/load-balancer-ssl-certs.png differ diff --git a/static/img/rancher/migrate-schedule-workloads.png b/static/img/rancher/migrate-schedule-workloads.png new file mode 100644 index 00000000000..c6ab638ac94 Binary files /dev/null and b/static/img/rancher/migrate-schedule-workloads.png differ diff --git a/static/img/rancher/node-schedule-advanced-options.png b/static/img/rancher/node-schedule-advanced-options.png new file mode 100644 index 00000000000..1d83edc767e Binary files /dev/null and b/static/img/rancher/node-schedule-advanced-options.png differ diff --git a/static/img/rancher/node-schedule-antiaffinity.png b/static/img/rancher/node-schedule-antiaffinity.png new file mode 100644 index 00000000000..74bd0455b50 Binary files /dev/null and b/static/img/rancher/node-schedule-antiaffinity.png differ diff --git a/static/img/rancher/node-scheduling-affinity.png b/static/img/rancher/node-scheduling-affinity.png new file mode 100644 index 00000000000..28d44908232 Binary files /dev/null and b/static/img/rancher/node-scheduling-affinity.png differ diff --git a/static/img/rancher/node-scheduling-labels.png b/static/img/rancher/node-scheduling-labels.png new file mode 100644 index 00000000000..4e1a634e74b Binary files /dev/null and b/static/img/rancher/node-scheduling-labels.png differ diff --git a/static/img/rancher/node-scheduling.png b/static/img/rancher/node-scheduling.png new file mode 100644 index 00000000000..953208144c7 Binary files /dev/null and b/static/img/rancher/node-scheduling.png differ diff --git a/static/img/rancher/one-six-schedule.png b/static/img/rancher/one-six-schedule.png new file mode 100644 index 00000000000..5bc05d915f8 Binary files /dev/null and b/static/img/rancher/one-six-schedule.png differ diff --git a/static/img/rancher/output-dot-text.png b/static/img/rancher/output-dot-text.png new file mode 100644 index 00000000000..ca39b2867b3 Binary files /dev/null and b/static/img/rancher/output-dot-text.png differ diff --git a/static/img/rancher/probes.svg b/static/img/rancher/probes.svg new file mode 100644 index 00000000000..007abfda6c1 --- /dev/null +++ b/static/img/rancher/probes.svg @@ -0,0 +1,2 @@ + +
Rancher v2.0 Kubernetes Cluster
<div style="text-align: center ; font-size: 18px"><font color="#3d3d3d">Rancher v2.0 Kubernetes Cluster</font></div>
Node
[Not supported by viewer]
Nginx
Nginx<br>
kubelet
[Not supported by viewer]
Node
[Not supported by viewer]
Nginx
Nginx<br>
kubelet
[Not supported by viewer]
1. On this node, the kubelet runs
 a liveness probe on a pod that's 
running. The pod either sends backs 
a response (success) or doesn't (failure) 
[Not supported by viewer]
2. On this node, the kubelets runs a
 readiness probe on a pod that's in 
the process of restarting. The probe 
finds that the pod is busy,so Kubernetes
 does not send it any requests.  
[Not supported by viewer]
\ No newline at end of file diff --git a/static/img/rancher/readiness-check-http.png b/static/img/rancher/readiness-check-http.png new file mode 100644 index 00000000000..1b2b19c2a75 Binary files /dev/null and b/static/img/rancher/readiness-check-http.png differ diff --git a/static/img/rancher/readiness-check-tcp.png b/static/img/rancher/readiness-check-tcp.png new file mode 100644 index 00000000000..0ba9869eb7c Binary files /dev/null and b/static/img/rancher/readiness-check-tcp.png differ diff --git a/static/img/rancher/readiness-check.png b/static/img/rancher/readiness-check.png new file mode 100644 index 00000000000..f978079aff7 Binary files /dev/null and b/static/img/rancher/readiness-check.png differ diff --git a/static/img/rancher/resolve-affinity.png b/static/img/rancher/resolve-affinity.png new file mode 100644 index 00000000000..d705a2c4fd8 Binary files /dev/null and b/static/img/rancher/resolve-affinity.png differ diff --git a/static/img/rancher/resolve-global.png b/static/img/rancher/resolve-global.png new file mode 100644 index 00000000000..583c500b8f6 Binary files /dev/null and b/static/img/rancher/resolve-global.png differ diff --git a/static/img/rancher/resolve-health-checks.png b/static/img/rancher/resolve-health-checks.png new file mode 100644 index 00000000000..3b7bfe282d1 Binary files /dev/null and b/static/img/rancher/resolve-health-checks.png differ diff --git a/static/img/rancher/resolve-links.png b/static/img/rancher/resolve-links.png new file mode 100644 index 00000000000..1f0544268f2 Binary files /dev/null and b/static/img/rancher/resolve-links.png differ diff --git a/static/img/rancher/resolve-load-balancer.png b/static/img/rancher/resolve-load-balancer.png new file mode 100644 index 00000000000..a03951098cf Binary files /dev/null and b/static/img/rancher/resolve-load-balancer.png differ diff --git a/static/img/rancher/resolve-pull-image.png b/static/img/rancher/resolve-pull-image.png new file mode 100644 index 00000000000..a822469d795 Binary files /dev/null and b/static/img/rancher/resolve-pull-image.png differ diff --git a/static/img/rancher/resolve-scale.png b/static/img/rancher/resolve-scale.png new file mode 100644 index 00000000000..5d36dec666a Binary files /dev/null and b/static/img/rancher/resolve-scale.png differ diff --git a/static/img/rancher/resource-constraint-settings.png b/static/img/rancher/resource-constraint-settings.png new file mode 100644 index 00000000000..68bf73cfc5d Binary files /dev/null and b/static/img/rancher/resource-constraint-settings.png differ diff --git a/static/img/rancher/schedule-specific-node.png b/static/img/rancher/schedule-specific-node.png new file mode 100644 index 00000000000..211bd90a190 Binary files /dev/null and b/static/img/rancher/schedule-specific-node.png differ diff --git a/static/img/rancher/scheduled-nodes.png b/static/img/rancher/scheduled-nodes.png new file mode 100644 index 00000000000..14807de68f8 Binary files /dev/null and b/static/img/rancher/scheduled-nodes.png differ diff --git a/static/img/rancher/separate-check.png b/static/img/rancher/separate-check.png new file mode 100644 index 00000000000..d094073c02e Binary files /dev/null and b/static/img/rancher/separate-check.png differ diff --git a/static/img/rancher/view-edit-yaml.png b/static/img/rancher/view-edit-yaml.png new file mode 100644 index 00000000000..36574ffa618 Binary files /dev/null and b/static/img/rancher/view-edit-yaml.png differ diff --git a/static/img/rancher/workload-scale.png b/static/img/rancher/workload-scale.png new file mode 100644 index 00000000000..f8aa87a6d5c Binary files /dev/null and b/static/img/rancher/workload-scale.png differ diff --git a/static/img/rancher/workload-type-option.png b/static/img/rancher/workload-type-option.png new file mode 100644 index 00000000000..02c74e29a6e Binary files /dev/null and b/static/img/rancher/workload-type-option.png differ diff --git a/static/img/rancher/workload-type.png b/static/img/rancher/workload-type.png new file mode 100644 index 00000000000..cfa3493381d Binary files /dev/null and b/static/img/rancher/workload-type.png differ