From 6e069e6f3f2c2fa49804c7d0b8c14f6627d55305 Mon Sep 17 00:00:00 2001 From: ROUL Date: Sun, 7 Oct 2018 21:18:19 +0200 Subject: [PATCH 01/30] Add cleaning information I turn around for hours before finding (https://github.com/rancher/rancher/issues/12237#issuecomment-376839985) - Hope it will help newby like me not wasting time :) --- .../v2.x/en/cluster-provisioning/rke-clusters/_index.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index d5f24679f95..6401b575149 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -18,9 +18,10 @@ RKE launched clusters are separated into two categories: - [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/): For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that are already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - + >**Note:** If you have already deploy Kubernetes on a node, and for any reason you wish to reused this node in an another cluster dont forget to clean it by hand after removed it from the previous cluster `sudo rm -rf /var/lib/etcd /etc/kubernetes/ssl /etc/cni /opt/cni /var/lib/cni /var/run/calico /etc/kubernetes/.tmp/` +
### Requirements -If you use RKE to set up a cluster, your cluster nodes must meet our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). \ No newline at end of file +If you use RKE to set up a cluster, your cluster nodes must meet our [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). From 8449bfe3fc7d7811e5ac0babc0ae0c58bc8aa290 Mon Sep 17 00:00:00 2001 From: galal-hussein Date: Thu, 11 Oct 2018 23:28:04 +0200 Subject: [PATCH 02/30] Add iscsi notes on persistent volumes --- .../volumes-and-storage/_index.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md index bbce60fb424..20bafcb2929 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.x/en/concepts/volumes-and-storage/ - /rancher/v2.x/en/tasks/clusters/adding-storage/ --- -When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. +When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. There are two ways to create persistent storage in Kubernetes: Persistent Volumes (PVs) and Storage Classes. @@ -163,6 +163,21 @@ _Storage Classes_ allow you to dynamically provision persistent volumes on deman 1. Click `Save`. +## Notes About iSCSI Volumes + +iSCSI initiator tool is embedded into the kubelet docker image `rancher/hyperkube` so that it can be used to discover and initiate a session with iSCSI target, however sometimes kubelet fails to automatically login and connect with iSCSI volumes, the main problem is that initiator version may not match the same version as the target which will cause the failure, due to this incompatibility the user may workaround this problem by installing the initator tool on the kubernetes nodes manually, and then edit the kubelet configuration to mount the iscsi binary and configuration on the node: +``` +services: + kubelet: + extra_binds: + - "/etc/iscsi:/etc/iscsi" + - "/sbin/iscsiadm:/sbin/iscsiadm" +``` + +Note: + +If the open-iscsi (deb) or iscsi-initiator-utils (yum) package isn't installed before the bind mounts are made, docker kindly creates files/directories for you on the host and messes up the package install. + ## What's Next? Mount Persistent Volumes to workloads so that your applications can store their data. You can mount a either a manually created Persistent Volumes or a dynamically created Persistent Volume, which is created from a a Storage Class. From 204af903191cc852f2868fd1d98ad35cc47dfb64 Mon Sep 17 00:00:00 2001 From: ferhat elmas Date: Fri, 12 Oct 2018 07:08:55 +0200 Subject: [PATCH 03/30] fix some typos --- .../configuration/adding-kernel-parameters/_index.md | 2 +- .../os/v1.x/en/installation/storage/additional-mounts/_index.md | 2 +- .../v2.x/en/admin-settings/pod-security-policies/_index.md | 2 +- content/rancher/v2.x/en/contributing/_index.md | 2 +- .../single-node/single-node-install-external-lb/_index.md | 2 +- content/rancher/v2.x/en/k8s-in-rancher/_index.md | 2 +- .../v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md | 2 +- .../k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md | 2 +- content/rancher/v2.x/en/tools/notifiers-and-alerts/_index.md | 2 +- .../rancher/v2.x/en/tools/pipelines/configurations/_index.md | 2 +- .../v0.1.x/en/config-options/cloud-providers/vsphere/_index.md | 2 +- layouts/shortcodes/requirements_ports_rke.html | 2 +- scripts/build-algolia.js | 2 +- scripts/dev | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md index c3b935b21ba..96b28b68688 100644 --- a/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md +++ b/content/os/v1.x/en/installation/configuration/adding-kernel-parameters/_index.md @@ -37,7 +37,7 @@ $ sudo ros install -d /dev/sda --append "rancheros.autologin=tty1" _Available as of v1.1_ -RancherOS v1.1.0 added a Syslinux boot menu, which allows you to temporarily edit the boot paramters, or to select "Debug logging", "Autologin", both "Debug logging & Autologin" and "Recovery Console". +RancherOS v1.1.0 added a Syslinux boot menu, which allows you to temporarily edit the boot parameters, or to select "Debug logging", "Autologin", both "Debug logging & Autologin" and "Recovery Console". On desktop systems the Syslinux boot menu can be switched to graphical mode by adding `UI vesamenu.c32` to a new line in `global.cfg` (use `sudo ros config syslinux` to edit the file). diff --git a/content/os/v1.x/en/installation/storage/additional-mounts/_index.md b/content/os/v1.x/en/installation/storage/additional-mounts/_index.md index a5bade4f73f..e568596e3d1 100644 --- a/content/os/v1.x/en/installation/storage/additional-mounts/_index.md +++ b/content/os/v1.x/en/installation/storage/additional-mounts/_index.md @@ -11,7 +11,7 @@ mounts: - ["/dev/vdb", "/mnt/s", "ext4", ""] ``` -**Important**: Be aware, the 4th parameter is mandatory and cannot be ommited (server crashes). It also yet cannot be `defaults` +**Important**: Be aware, the 4th parameter is mandatory and cannot be omitted (server crashes). It also yet cannot be `defaults` As you will use the `ros` cli most probably, it would look like this: diff --git a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md index 802cb2a5604..51b190231ef 100644 --- a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md @@ -38,7 +38,7 @@ Rancher ships with two default Pod Security Policies (PSPs): the `restricted` an - `unrestricted` - This policy is equivilent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. + This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. ## Creating Pod Security Policies diff --git a/content/rancher/v2.x/en/contributing/_index.md b/content/rancher/v2.x/en/contributing/_index.md index 3cb0d0833c2..40552c68691 100644 --- a/content/rancher/v2.x/en/contributing/_index.md +++ b/content/rancher/v2.x/en/contributing/_index.md @@ -79,7 +79,7 @@ Please follow this checklist when filing an issue which will helps us investigat - Docker daemon logging (these might not all exist, depending on operating system) - `/var/log/docker.log` -If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determing what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. +If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. ### Docs diff --git a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md b/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md index e24cefd9cd5..109dc7782d7 100644 --- a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md +++ b/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md @@ -159,7 +159,7 @@ server { ### API Auditing -If you want to record all transations with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. +If you want to record all transitions with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. -e AUDIT_LEVEL=1 \ -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ diff --git a/content/rancher/v2.x/en/k8s-in-rancher/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/_index.md index 7858a9b4a3b..6a7b31e6dbd 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/_index.md @@ -111,7 +111,7 @@ For more information, see [Ingress]({{< baseurl >}}/rancher/v2.x/en/k8s-in-ranch ## Service Discovery -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labled pods to a specific hostname. +After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. For more information, see [Service Discovery]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/service-discovery). diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index f3c06af67a8..8926a33aeb5 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -857,7 +857,7 @@ Use your load testing tool to scale down to one pod when all metrics below targe ### Conclusion -Horizontal Pod Autoscaling is a great way to automate the number of pod you have deployed for maximum efficency. You can use it to accomodate deployment scale to real service load and to meet service level agreements. +Horizontal Pod Autoscaling is a great way to automate the number of pod you have deployed for maximum efficiency. You can use it to accommodate deployment scale to real service load and to meet service level agreements. By adjusting the `horizontal-pod-autoscaler-downscale-delay` and `horizontal-pod-autoscaler-upscale-delay` flag values, you can adjust the time needed before kube-controller scales your pods up or down. diff --git a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md index 8caf834db7e..b7f03267f10 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md @@ -62,7 +62,7 @@ Ingress can be added for workloads to provide load balancing, SSL termination an 1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. -1. If any of your ingress rules handle requests for encrypted ports, add a certificate to encrpyt/decrypt communications. +1. If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. >**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{< baseurl >}}/rancher/v2.x/en/k8s-in-rancher/certificates/). diff --git a/content/rancher/v2.x/en/tools/notifiers-and-alerts/_index.md b/content/rancher/v2.x/en/tools/notifiers-and-alerts/_index.md index deec63e0391..6734957bb7f 100644 --- a/content/rancher/v2.x/en/tools/notifiers-and-alerts/_index.md +++ b/content/rancher/v2.x/en/tools/notifiers-and-alerts/_index.md @@ -62,7 +62,7 @@ Set up a notifier so that you can begin configuring and sending alerts. 1. Enter a **Name** for the notifier. 1. Using the app of your choice, create a webhook URL. 1. Enter your webhook **URL**. -1. Click **Test**. If the test is successfull, the URL you're configuring as a notifier outputs `Webhook setting validated`. +1. Click **Test**. If the test is successful, the URL you're configuring as a notifier outputs `Webhook setting validated`. {{% /accordion %}} 1. Click **Add** to complete adding the notifier. diff --git a/content/rancher/v2.x/en/tools/pipelines/configurations/_index.md b/content/rancher/v2.x/en/tools/pipelines/configurations/_index.md index 038537994dc..a4f44a8ac40 100644 --- a/content/rancher/v2.x/en/tools/pipelines/configurations/_index.md +++ b/content/rancher/v2.x/en/tools/pipelines/configurations/_index.md @@ -108,7 +108,7 @@ The first stage is preserved to be a cloning step that checks out source code fr {{% /accordion %}} {{% accordion id="run-script" label="Run Script" %}} -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience you can use variables to refer to metadata of a pipeline execution. Please go to [reference page](/rancher/v2.x/en/tools/pipelines/reference/#variable-substitution) for the list of available vairables. +The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience you can use variables to refer to metadata of a pipeline execution. Please go to [reference page](/rancher/v2.x/en/tools/pipelines/reference/#variable-substitution) for the list of available variables. {{% tabs %}} diff --git a/content/rke/v0.1.x/en/config-options/cloud-providers/vsphere/_index.md b/content/rke/v0.1.x/en/config-options/cloud-providers/vsphere/_index.md index 80169afd0b7..fdfdddcac7c 100644 --- a/content/rke/v0.1.x/en/config-options/cloud-providers/vsphere/_index.md +++ b/content/rke/v0.1.x/en/config-options/cloud-providers/vsphere/_index.md @@ -58,7 +58,7 @@ The vSphere configuration options are divided into 5 groups: ### global -The main purpose of global options is to be able to define a common set of configuration parameters that will be inherited by all vCenters defined under the `virtual_center` directive unless explicitely defined there. +The main purpose of global options is to be able to define a common set of configuration parameters that will be inherited by all vCenters defined under the `virtual_center` directive unless explicitly defined there. Accordingly, the `global` directive accepts the same configuration options that are available under the `virtual_center` directive. Additionally it accepts a single parameter that can only be specified here: diff --git a/layouts/shortcodes/requirements_ports_rke.html b/layouts/shortcodes/requirements_ports_rke.html index 01b76d194fc..c972bed77d2 100644 --- a/layouts/shortcodes/requirements_ports_rke.html +++ b/layouts/shortcodes/requirements_ports_rke.html @@ -304,6 +304,6 @@

Information on local node traffic

-

Kubernetes healthchecks (livenessProbe and readinessProbe) are executed on the host itself. On most nodes, this is allowed by default. When you have applied strict host firewall (i.e. iptables) policies on the node, or when you are using nodes that have multiple interfaces (multihomed), this traffic gets blocked. In this case, you have to explicitely allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as Source or Destination in your security group, that this only applies to the private interface of the nodes/instances. +

Kubernetes healthchecks (livenessProbe and readinessProbe) are executed on the host itself. On most nodes, this is allowed by default. When you have applied strict host firewall (i.e. iptables) policies on the node, or when you are using nodes that have multiple interfaces (multihomed), this traffic gets blocked. In this case, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as Source or Destination in your security group, that this only applies to the private interface of the nodes/instances.

diff --git a/scripts/build-algolia.js b/scripts/build-algolia.js index ebbf2fb4b5f..efc80f581d3 100755 --- a/scripts/build-algolia.js +++ b/scripts/build-algolia.js @@ -79,7 +79,7 @@ nodes.forEach(node => { } - // remove potentially large content (see size limits) and replace with teh summary so that we don't get results with zero highlightable results + // remove potentially large content (see size limits) and replace with the summary so that we don't get results with zero highlightable results node.content = node.summary; // remove summary for dedup diff --git a/scripts/dev b/scripts/dev index 3ede772eac6..b75c0331226 100755 --- a/scripts/dev +++ b/scripts/dev @@ -63,7 +63,7 @@ while getopts ":bdp:t:u" opt;do UPLOAD="true" ;; \?) - echoerr "Invalid arguemnts" + echoerr "Invalid arguments" print_help exit 1 ;; From 4120f00f7e34288516da46854084ce9cb142a346 Mon Sep 17 00:00:00 2001 From: niusmallnan Date: Fri, 12 Oct 2018 15:07:50 +0800 Subject: [PATCH 04/30] Only support built-in services for writing files in specific system services --- .../os/v1.x/en/installation/configuration/write-files/_index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/content/os/v1.x/en/installation/configuration/write-files/_index.md b/content/os/v1.x/en/installation/configuration/write-files/_index.md index bc86e0a21ab..3853e2241df 100644 --- a/content/os/v1.x/en/installation/configuration/write-files/_index.md +++ b/content/os/v1.x/en/installation/configuration/write-files/_index.md @@ -39,3 +39,5 @@ write_files: restrict 127.0.0.1 restrict [::1] ``` + +> Note: Currently only supports built-in services, the custom services are not applicable. From e78a48716f008ffb1b967cfc613b790f998017d4 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Wed, 10 Oct 2018 23:03:23 +0200 Subject: [PATCH 05/30] Add note to cacerts.pem filename when adding secret --- .../en/installation/ha/helm-rancher/tls-secrets/_index.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md index fa2e65fe889..796305b067e 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/_index.md @@ -11,8 +11,8 @@ Use `kubectl` with the `tls` secret type to create the secrets. ``` kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=./tls.crt \ - --key=./tls.key + --cert=tls.crt \ + --key=tls.key ``` ### Private CA Signed - Additional Steps @@ -21,6 +21,8 @@ If you are using a private CA, Rancher will need to have a copy of the CA cert t Copy the CA cert into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. +>**Important:** Make sure the file is called `cacerts.pem` as Rancher uses that filename to configure the CA cert. + ``` kubectl -n cattle-system create secret generic tls-ca \ --from-file=cacerts.pem From fea801982a9e8cccfdc05f1a2161a4b01c8acfa2 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Fri, 12 Oct 2018 16:00:47 +0200 Subject: [PATCH 06/30] Add note on removing addons section when migrating from RKE --- .../migrating-from-rke-add-on/_index.md | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md index 0041f925b23..b3562e1a750 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md @@ -48,6 +48,40 @@ kubectl -n cattle-system delete clusterrolebinding cattle-crb kubectl -n cattle-system delete serviceaccount cattle-admin ``` +### Remove addons section from `rancher-cluster.yml` + +The addons section from `rancher-cluster.yml` contains all the resources needed to deploy Rancher using RKE. By switching to Helm, this part of the cluster configuration file is no longer needed. Open `rancher-cluster.yml` in your favorite text editor and remove the addons section: + +>**Important:** Make sure you only remove the addons section from the cluster configuration file. + +``` +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Remove addons section from here til end of file +addons: |- + --- + ... +# End of file +``` + ### Follow Helm and Rancher install steps From here follow the standard install steps. From a600d24c2d7b69a1906ae7d6308dc3c7d1dc0a4a Mon Sep 17 00:00:00 2001 From: Denise Date: Fri, 12 Oct 2018 10:58:20 -0700 Subject: [PATCH 07/30] Update _index.md --- .../os/v1.x/en/installation/configuration/write-files/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/os/v1.x/en/installation/configuration/write-files/_index.md b/content/os/v1.x/en/installation/configuration/write-files/_index.md index 3853e2241df..c222448370c 100644 --- a/content/os/v1.x/en/installation/configuration/write-files/_index.md +++ b/content/os/v1.x/en/installation/configuration/write-files/_index.md @@ -40,4 +40,4 @@ write_files: restrict [::1] ``` -> Note: Currently only supports built-in services, the custom services are not applicable. +> **Note:** Currently, writing files to a specific system service is only supported for RancherOS's built-in services. You are unable to write files to any custom system services. From efaa771460475dc1df13c570f1fffff8cb7c92eb Mon Sep 17 00:00:00 2001 From: Denise Date: Thu, 11 Oct 2018 20:51:46 -0700 Subject: [PATCH 08/30] Update _index.md --- .../volumes-and-storage/_index.md | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md index 20bafcb2929..fd0a3644f5d 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/_index.md @@ -163,9 +163,28 @@ _Storage Classes_ allow you to dynamically provision persistent volumes on deman 1. Click `Save`. -## Notes About iSCSI Volumes +## iSCSI Volumes With Rancher Launched Kubernetes Clusters + +In [Rancher Launched Kubernetes clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. + +Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. + + +If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: + +| Platform | Package Name | Install Command | +| ------------- | ----------------------- | -------------------------------------- | +| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | +| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | + + +
+After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. -iSCSI initiator tool is embedded into the kubelet docker image `rancher/hyperkube` so that it can be used to discover and initiate a session with iSCSI target, however sometimes kubelet fails to automatically login and connect with iSCSI volumes, the main problem is that initiator version may not match the same version as the target which will cause the failure, due to this incompatibility the user may workaround this problem by installing the initator tool on the kubernetes nodes manually, and then edit the kubelet configuration to mount the iscsi binary and configuration on the node: ``` services: kubelet: @@ -174,9 +193,6 @@ services: - "/sbin/iscsiadm:/sbin/iscsiadm" ``` -Note: - -If the open-iscsi (deb) or iscsi-initiator-utils (yum) package isn't installed before the bind mounts are made, docker kindly creates files/directories for you on the host and messes up the package install. ## What's Next? From 06925962fdb964c132a4affa426c266db346470c Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Fri, 12 Oct 2018 16:54:07 -0700 Subject: [PATCH 09/30] adding note that points to topic on how to clean it --- .../v2.x/en/cluster-provisioning/rke-clusters/_index.md | 3 ++- .../rke-clusters/custom-nodes/_index.md | 5 +++-- .../rke-clusters/node-pools/azure/_index.md | 2 ++ .../rke-clusters/node-pools/digital-ocean/_index.md | 2 ++ .../rke-clusters/node-pools/ec2/_index.md | 1 + .../rke-clusters/node-pools/vsphere/_index.md | 7 ++++++- 6 files changed, 16 insertions(+), 4 deletions(-) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index 6401b575149..14437de9abd 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -18,7 +18,8 @@ RKE launched clusters are separated into two categories: - [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/): For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that are already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - >**Note:** If you have already deploy Kubernetes on a node, and for any reason you wish to reused this node in an another cluster dont forget to clean it by hand after removed it from the previous cluster `sudo rm -rf /var/lib/etcd /etc/kubernetes/ssl /etc/cni /opt/cni /var/lib/cni /var/run/calico /etc/kubernetes/.tmp/` + +>**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail.
diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md index 6f3bc901216..25340cb68d6 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md @@ -34,9 +34,10 @@ Begin creation of a custom cluster by provisioning a Linux host. Your host can b - An on-premise VM - A bare-metal server ->**Bare-Metal Server Note:** +>**Notes:** > -While creating your cluster, you must assign Kubernetes roles to your cluster nodes. If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). +>- While creating your cluster, you must assign Kubernetes roles to your cluster nodes. If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). +>- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. Provision the host according to the requirements below. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md index dfece5159ba..aeaa0b7505a 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md @@ -8,6 +8,8 @@ aliases: Use {{< product >}} to create a Kubernetes cluster in Azure. +>**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + 1. From the **Clusters** page, click **Add Cluster**. 2. Choose **Azure**. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md index 479683c182c..6a16f6eb61d 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md @@ -7,6 +7,8 @@ aliases: --- Use {{< product >}} to create a Kubernetes cluster using DigitalOcean. +>**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + 1. From the **Clusters** page, click **Add Cluster**. 2. Choose **DigitalOcean**. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 569fd285f92..2c7172c096c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -14,6 +14,7 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. - [Example IAM Policy](#example-iam-policy) - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - IAM Policy added as Permission to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. +- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. ## Create the cluster diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md index 2bef973caf4..d1ea9a08159 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md @@ -16,7 +16,12 @@ When creating a vSphere cluster, Rancher first provisions the specified amount o ## Prerequisites -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. If you are planning to make use of vSphere volumes for persistent storage in the cluster, there are [additional requirements]({{< baseurl >}}/rke/v0.1.x/en/config-options/cloud-providers/vsphere/) that must be met. +- Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. If you are planning to make use of vSphere volumes for persistent storage in the cluster, there are [additional requirements]({{< baseurl >}}/rke/v0.1.x/en/config-options/cloud-providers/vsphere/) that must be met. + +- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +## Provisioning a vSphere Cluster + The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: 1. From the **vSphere** console, go to the **Administration** page. From 6fd8056301bf7d046f32f2a7a16ab143f7ae4316 Mon Sep 17 00:00:00 2001 From: Denise Date: Sun, 14 Oct 2018 19:24:41 -0700 Subject: [PATCH 10/30] Update _index.md --- .../single-node/single-node-install-external-lb/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md b/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md index 109dc7782d7..45e501aeaa4 100644 --- a/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md +++ b/content/rancher/v2.x/en/installation/single-node/single-node-install-external-lb/_index.md @@ -159,7 +159,7 @@ server { ### API Auditing -If you want to record all transitions with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. +If you want to record all transactions with the Rancher API, enable the [API Auditing]({{< baseurl >}}/rancher/v2.x/en/installation/api-auditing) feature by adding the flags below into your install command. -e AUDIT_LEVEL=1 \ -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ From 7eddc284ffb07300ebcc770a30a73efecfd83fed Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Mon, 15 Oct 2018 22:59:25 +0200 Subject: [PATCH 11/30] External TLS set typo --- .../en/installation/ha/helm-rancher/chart-options/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md index d94356b3115..c7a12773f3f 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md @@ -84,7 +84,7 @@ See [Installing Rancher - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/ We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. > **Note:** If you are using a Private CA signed cert, add `--set privateCA=true` and see [Adding TLS Secrets - Private CA Signed - Additional Steps]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/#private-ca-signed---additional-steps) to add the CA cert for Rancher. From 7cb793722ad486ff8bda9c5c666cd8ed27509a90 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Fri, 12 Oct 2018 20:40:44 +0200 Subject: [PATCH 12/30] Enable etcd snapshots by default --- .../v2.x/en/installation/ha/kubernetes-rke/_index.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md index 6ff30b06aa3..b364c8c75d9 100644 --- a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md +++ b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md @@ -28,6 +28,12 @@ nodes: internal_address: 172.16.42.73 user: ubuntu role: [controlplane,worker,etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h ``` #### Common RKE Nodes: Options From ba813c87fbb5585fc1f7fd33c063556c17d21394 Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Mon, 15 Oct 2018 17:30:40 -0700 Subject: [PATCH 13/30] removing cleaning note from nodepool sections --- .../v2.x/en/cluster-provisioning/rke-clusters/_index.md | 2 +- .../cluster-provisioning/rke-clusters/custom-nodes/_index.md | 2 +- .../rke-clusters/node-pools/azure/_index.md | 2 -- .../rke-clusters/node-pools/digital-ocean/_index.md | 2 -- .../rke-clusters/node-pools/ec2/_index.md | 2 +- .../rke-clusters/node-pools/vsphere/_index.md | 4 +--- 6 files changed, 4 insertions(+), 10 deletions(-) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index 14437de9abd..049ce3d7e74 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -19,7 +19,7 @@ RKE launched clusters are separated into two categories: For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that are already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. ->**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + >**Note:** If you want to reuse a node from a previous custom cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail.
diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md index 25340cb68d6..0248688e578 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md @@ -37,7 +37,7 @@ Begin creation of a custom cluster by provisioning a Linux host. Your host can b >**Notes:** > >- While creating your cluster, you must assign Kubernetes roles to your cluster nodes. If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). ->- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. +>- If you want to reuse a node from a previous custom cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. Provision the host according to the requirements below. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md index aeaa0b7505a..dfece5159ba 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md @@ -8,8 +8,6 @@ aliases: Use {{< product >}} to create a Kubernetes cluster in Azure. ->**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - 1. From the **Clusters** page, click **Add Cluster**. 2. Choose **Azure**. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md index 6a16f6eb61d..479683c182c 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md @@ -7,8 +7,6 @@ aliases: --- Use {{< product >}} to create a Kubernetes cluster using DigitalOcean. ->**Note:** If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - 1. From the **Clusters** page, click **Add Cluster**. 2. Choose **DigitalOcean**. diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md index 2c7172c096c..899f4d7d3ac 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md @@ -14,7 +14,7 @@ Use {{< product >}} to create a Kubernetes cluster in Amazon EC2. - [Example IAM Policy](#example-iam-policy) - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - IAM Policy added as Permission to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. -- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + ## Create the cluster diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md index d1ea9a08159..8eb4e5373cb 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md @@ -16,9 +16,7 @@ When creating a vSphere cluster, Rancher first provisions the specified amount o ## Prerequisites -- Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. If you are planning to make use of vSphere volumes for persistent storage in the cluster, there are [additional requirements]({{< baseurl >}}/rke/v0.1.x/en/config-options/cloud-providers/vsphere/) that must be met. - -- If you want to reuse a node from a previous Rancher Launched Kubernetes cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. +Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. If you are planning to make use of vSphere volumes for persistent storage in the cluster, there are [additional requirements]({{< baseurl >}}/rke/v0.1.x/en/config-options/cloud-providers/vsphere/) that must be met. ## Provisioning a vSphere Cluster From 332512ff6ff1647a567cc2201139316834a82d1e Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Tue, 16 Oct 2018 12:58:10 +0200 Subject: [PATCH 14/30] Typos/misc improvements --- .../air-gap-installation/_index.md | 2 +- .../rancher/v2.x/en/installation/ha/_index.md | 12 +++++----- .../installation/ha/create-nodes-lb/_index.md | 12 ++++------ .../en/installation/ha/helm-init/_index.md | 4 ++-- .../installation/ha/kubernetes-rke/_index.md | 24 ++++++++++--------- .../en/installation/ha/rke-add-on/_index.md | 2 +- .../ha/rke-add-on/api-auditing/_index.md | 2 +- .../ha/rke-add-on/layer-4-lb/_index.md | 4 ++-- .../ha/rke-add-on/layer-4-lb/nlb/_index.md | 2 +- .../ha/rke-add-on/layer-7-lb/_index.md | 4 ++-- .../ha/rke-add-on/layer-7-lb/alb/_index.md | 2 +- .../ha/rke-add-on/layer-7-lb/nginx/_index.md | 2 +- .../ha/rke-add-on/proxy/_index.md | 2 +- .../404-default-backend/_index.md | 2 +- .../ha/rke-add-on/troubleshooting/_index.md | 2 +- .../generic-troubleshooting/_index.md | 2 +- .../job-complete-status/_index.md | 2 +- .../v2.x/en/upgrades/upgrades/_index.md | 4 ++-- .../upgrades/ha-server-upgrade/_index.md | 2 +- .../migrating-from-rke-add-on/_index.md | 2 +- 20 files changed, 45 insertions(+), 45 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/_index.md index 4ef7ca1cabd..b6a943ab19c 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/_index.md @@ -8,7 +8,7 @@ In environments where security is high priority, you can set up Rancher in an ai - Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machine. If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). - For each Rancher [release](https://github.com/rancher/rancher/releases), we provide the Docker images and scripts needed to mirror these images to your own registry. The Docker images are used when installing Rancher in a HA setup, when provisioning a cluster where Rancher is launching Kubernetes, or when you enable features like pipelines or logging. + For each Rancher [release](https://github.com/rancher/rancher/releases), we provide the Docker images and scripts needed to mirror these images to your own registry. The Docker images are used when installing Rancher in an HA setup, when provisioning a cluster where Rancher is launching Kubernetes, or when you enable features like pipelines or logging. - **Installation Option:** Before beginning your air gap installation, choose whether you want ~~a~~ [single-node install]({{< baseurl >}}/rancher/v2.x/en/installation/single-node) or a [high availability install]({{< baseurl >}}/rancher/v2.x/en/installation/ha). View your chosen configuration's introduction notes along with Rancher's [node requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). diff --git a/content/rancher/v2.x/en/installation/ha/_index.md b/content/rancher/v2.x/en/installation/ha/_index.md index 80a7fa6af48..24fe7a7b010 100644 --- a/content/rancher/v2.x/en/installation/ha/_index.md +++ b/content/rancher/v2.x/en/installation/ha/_index.md @@ -7,14 +7,14 @@ For production environments, we recommend installing Rancher in a high-availabil This procedure walks you through setting up a 3-node cluster with RKE and installing the Rancher chart with the Helm package manager. -> **Note:** For the best performance, we recommend this Kubernetes cluster be dedicated only to the Rancher workload. +> **Important:** For the best performance, we recommend this Kubernetes cluster to be dedicated only to run Rancher. ## Recommended Architecture * DNS for Rancher should resolve to a layer 4 load balancer -* The Load Balancer should forward ports 80 and 443 TCP to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect http port 80 to https and terminate SSL/TLS on port 443. -* The Ingress controller will forward traffic to port 80 on the pod in the Rancher deployment. +* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. HA Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers ![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) @@ -36,7 +36,7 @@ The following CLI tools are required for this install. Please make sure these to ## Additional Install Options -* [Migrating from RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) +* [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) ## Previous Methods @@ -44,6 +44,6 @@ The following CLI tools are required for this install. Please make sure these to > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. * [RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/) diff --git a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md b/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md index 389b0e89f68..26bb172cf6a 100644 --- a/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/create-nodes-lb/_index.md @@ -7,15 +7,15 @@ Use your provider of choice to provision 3 nodes and a Load Balancer endpoint fo > **Note:** These nodes must be in the same region/datacenter. You may place these servers in separate availability zones. -**Don't forget to collect the SSH credentials and DNS or IP addresses of your nodes to provide to RKE in the next step.** +### Node Requirements -### Host Requirements +View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). -View the requirements for nodes hosting Rancher at [Requirements]({{< baseurl >}}/rancher/v2.x/en/installation/requirements). +View the OS requirements for RKE at [RKE Requirements]({{< baseurl >}}/rke/v0.1.x/en/os/) ### Load Balancer -RKE will configure an ingress-controller pod, on each of your nodes. The ingress-controller pods are bound to ports 80 and 443 TCP on the host network and are the entry point for HTTPS traffic to the Rancher server. +RKE will configure an Ingress controller pod, on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. @@ -23,6 +23,4 @@ Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configurat * [Amazon NLB]({{< baseurl >}}/rancher/v2.x/en/installation/ha/create-nodes-lb/nlb/) -
- -### [Next: Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/) \ No newline at end of file +### [Next: Install Kubernetes with RKE]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/) diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md index 1d58e21872b..5fc3a6fcf6a 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md @@ -3,7 +3,7 @@ title: 3 - Initialize Helm (Install tiller) weight: 195 --- -Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). > **Note:** For systems without direct internet access see [Helm - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. @@ -12,7 +12,7 @@ Helm is the package management tool of choice for Kubernetes. Helm "charts" prov Helm installs the `tiller` service on your cluster to manage charts. Since RKE enables RBAC by default we will need to use `kubectl` to create a `serviceaccount` and `clusterrolebinding` so `tiller` has permission to deploy to the cluster. * Create the `ServiceAccount` in the `kube-system` namespace. -* Create the `ClusterRoleBinding` to give the `tiller` account access to the cluster. +* Create the `ClusterRoleBinding` to give the `tiller` service account access to the cluster. * Finally use `helm` to initialize the `tiller` service ```plain diff --git a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md index b364c8c75d9..b1b30c8b728 100644 --- a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md +++ b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md @@ -9,7 +9,7 @@ Use RKE to install Kubernetes with a high availability etcd configuration. ### Create the `rancher-cluster.yml` File -Using the sample below create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 Nodes you created. +Using the sample below create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. > **Note:** If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. @@ -36,21 +36,21 @@ services: retention: 24h ``` -#### Common RKE Nodes: Options +#### Common RKE Nodes Options -| Option | Description | -| --- | --- | -| `address` | (required) The public DNS or IP address | -| `internal_address` | (optional) The private DNS or IP address for internal cluster traffic | -| `role` | (required) List of Kubernetes roles assigned to the node | -| `ssh_key_path` | (optional) Path to SSH private key used to authenticate to the node | -| `user` | (required) A user that can run docker commands | +| Option | Required | Description | +| --- | --- | --- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | #### Advanced Configurations RKE has many configuration options for customizing the install to suit your specific environment. -Please see the [RKE Documentation]({{< baseurl >}}/rke/v0.1.x/en/) for the full list of options and capabilities. +Please see the [RKE Documentation]({{< baseurl >}}/rke/v0.1.x/en/config-options/) for the full list of options and capabilities. ### Run RKE @@ -58,6 +58,8 @@ Please see the [RKE Documentation]({{< baseurl >}}/rke/v0.1.x/en/) for the full rke up --config ./rancher-cluster.yml ``` +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + ### Testing Your Cluster RKE should have created a file `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. @@ -70,7 +72,7 @@ You can copy this file to `$HOME/.kube/config` or if you are working with multip export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml ``` -Test your connectivity with `kubectl` and see if you can get the list of nodes back. +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state. ``` kubectl get nodes diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md index 2e32347bafa..1c9845e9cdc 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/_index.md @@ -7,7 +7,7 @@ weight: 276 > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. * [High Availability Installation with External Load Balancer (TCP/Layer 4)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb) diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md index f19184b574e..c8f6933f895 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md index 2272f5a42fb..b192803568f 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/_index.md @@ -9,14 +9,14 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - Layer 4 load balancer (TCP) - [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) -In a HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. +In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. HA Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers ![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha.svg) diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md index f2e8368c779..9ba02bc211a 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-4-lb/nlb/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md index 132000ae609..75f8b9ea423 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/_index.md @@ -9,14 +9,14 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - Layer 7 Loadbalancer with SSL termination (HTTPS) - [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) -In a HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. +In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. HA Rancher install with layer 7 load balancer, depicting SSL termination at load balancer ![Rancher HA]({{< baseurl >}}/img/rancher/ha/rancher2ha-l7.svg) diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md index 8d5e893f1d7..229cfa632d0 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/alb/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md index 515c867c974..0a99effcaba 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/layer-7-lb/nginx/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Install NGINX diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md index 57b2381e1ce..b35ac7a9e09 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/proxy/_index.md @@ -7,7 +7,7 @@ weight: 277 > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md index 5bcd69fc4fb..086744c0d50 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/404-default-backend/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md index dee15c33749..5aeeafc2224 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This section contains common errors seen when setting up a High Availability Installation. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md index a529d1dfa44..543664669d9 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/generic-troubleshooting/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. Below are steps that you can follow to determine what is wrong in your cluster. diff --git a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md index fc6309be3d9..ca20cded639 100644 --- a/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md +++ b/content/rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/job-complete-status/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/_index.md index dc876a708c9..6c3db9da3d9 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/_index.md @@ -13,7 +13,7 @@ This section contains information about how to upgrade your Rancher server to a - [Upgrade an HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/) - [Upgrade a Air Gap HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/) -- [Migrating from a RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) +- [Migrating from an RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) ### Upgrading an RKE Add-on Install @@ -21,6 +21,6 @@ This section contains information about how to upgrade your Rancher server to a > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - [Upgrading a High Availability Install - RKE Add-On Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/_index.md index b494c9a0ab5..c7686e44015 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/_index.md @@ -9,7 +9,7 @@ aliases: > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This document is for upgrading Rancher HA installed with the RKE Add-On yaml. See these docs to migrate to or upgrade Rancher installed with the Helm chart. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md index b3562e1a750..5bf1119c27a 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md @@ -1,5 +1,5 @@ --- -title: Migrating from a HA RKE Add-on Install +title: Migrating from an HA RKE Add-on Install weight: 1030 --- From e1c94619f4276a7ebf059deba4c6ec85a8d69124 Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Tue, 16 Oct 2018 17:33:45 -0700 Subject: [PATCH 15/30] updating ha install/upgrade command --- .../air-gap-installation/install-rancher/_index.md | 4 ++-- .../v2.x/en/installation/ha/helm-rancher/_index.md | 13 ++++++++----- .../ha-server-upgrade-helm-airgap/_index.md | 6 +++--- .../upgrades/ha-server-upgrade-helm/_index.md | 8 +++++--- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md index 7b7490b3938..9689a7d9163 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md @@ -104,10 +104,10 @@ Install the Rancher chart repo. helm repo add rancher-stable https://releases.rancher.com/server-charts/stable ``` -Fetch the latest `rancher-stable/rancher` chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +Fetch the latest `rancher-latest/rancher` chart. This will pull down the chart and save it in the current directory as a `.tgz` file. ```plain -helm fetch rancher-stable/rancher +helm fetch rancher-latest/rancher ``` Render the template with the options you would use to install the chart. See [Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/) for details on the various options. Remember to set the `rancherImage` option to pull the image from your private registry. This will create a `rancher` directory with the Kubernetes manifest files. diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md index ab7f4654d31..3d9af17c245 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md @@ -11,10 +11,13 @@ Rancher installation is now managed using the Helm package manager for Kubernete Use `helm repo add` to add the Rancher chart repository. +>**Note:** We recommend adding the `latest` Rancher chart repository used in the command below, but you also have the option of using our `stable` repository. For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). + ``` -helm repo add rancher-stable https://releases.rancher.com/server-charts/stable +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest ``` + ## Chart Versioning Notes Up until the initial helm chart release for v2.1.0, the helm chart version matched the Rancher version (i.e `appVersion`). @@ -25,7 +28,7 @@ Run `helm search rancher` to view which Rancher version will be launched for the ``` NAME CHART VERSION APP VERSION DESCRIPTION -rancher-stable/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... +rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... ``` ### Install cert-manager @@ -63,7 +66,7 @@ The only requirement is to set the `hostname` to the DNS name you pointed at you >**Using Air Gap?** [Set the `rancherImage` option]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#install-rancher-using-private-registry) in your command, pointing toward your private registry. ``` -helm install rancher-stable/rancher \ +helm install rancher-latest/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org @@ -78,7 +81,7 @@ Set `hostname`, `ingress.tls.source=letsEncrypt` and LetsEncrypt options. >**Using Air Gap?** [Set the `rancherImage` option]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#install-rancher-using-private-registry) in your command, pointing toward your private registry. ``` -helm install rancher-stable/rancher \ +helm install rancher-latest/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ @@ -97,7 +100,7 @@ Set `hostname` and `ingress.tls.source=secret`. > **Note:** If you are using a Private CA signed cert, add `--set privateCA=true` ``` -helm install rancher-stable/rancher \ +helm install rancher-latest/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md index 5095a92343c..55021f59cb3 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md @@ -34,7 +34,7 @@ Run `helm search rancher` to view which Rancher version will be launched for the ``` NAME CHART VERSION APP VERSION DESCRIPTION -rancher-stable/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... +rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... ``` ## Upgrade Rancher @@ -45,12 +45,12 @@ rancher-stable/rancher 2018.10.1 v2.1.0 Install Rancher Serve helm repo update ``` -1. Fetch the latest `rancher-stable/rancher` chart. +1. Fetch the latest `rancher-latest/rancher` chart. This will pull down the chart and save it in the current directory as a `.tgz` file. ```plain - helm fetch rancher-stable/rancher + helm fetch rancher-latest/rancher ``` 1. Render the upgrade template. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md index a9fcaf3eb97..2a091738e35 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md @@ -48,7 +48,7 @@ Run `helm search rancher` to view which Rancher version will be launched for the ``` NAME CHART VERSION APP VERSION DESCRIPTION -rancher-stable/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... +rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... ``` ## Upgrade Rancher @@ -74,10 +74,12 @@ rancher-stable/rancher 2018.10.1 v2.1.0 Install Rancher Serve 3. Take all values from the previous command and use `helm` with `--set` options to upgrade Rancher to the latest version. ``` - helm upgrade rancher rancher-stable/rancher --set hostname=rancher.my.org + helm upgrade rancher rancher-latest/rancher --set hostname=rancher.my.org ``` - > **Important:** For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. + > **Important:** + >- For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. + >- During upgrade, you must use the Rancher chart repository that you used during initial install (the `latest` repository by default). For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/) ## Rolling Back From c7bf9352509322f5a91f08264ace70524ae21cef Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Tue, 16 Oct 2018 19:07:27 -0700 Subject: [PATCH 16/30] updating server tags topic for ha --- .../restorations/ha-restoration/_index.md | 24 +++------- .../en/installation/server-tags/_index.md | 46 +++++++++++++++++-- .../ha-server-upgrade-helm-airgap/_index.md | 6 ++- .../upgrades/ha-server-upgrade-helm/_index.md | 5 +- 4 files changed, 58 insertions(+), 23 deletions(-) diff --git a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md index 221d4d74ff9..0ae9fc9ad89 100644 --- a/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md +++ b/content/rancher/v2.x/en/backups/restorations/ha-restoration/_index.md @@ -9,26 +9,16 @@ This procedure describes how to use RKE to restore a snapshot of the Rancher Kub ## Restore Outline -1. [Preparation](#1-preparation) + - Install utilities and create new or clean existing nodes to prepare for restore. -2. [Place Snapshot and PKI Bundle](#2-place-snapshot-and-pki-bundle) - - Pick a node and place snapshot `.db` and `pki.bundle.tar.gz` files. - -3. [Configure RKE](#3-configure-rke) - - Configure RKE `cluster.yml`. Remove `addons:` section and point configuration to the clean nodes. - -4. [Restore Database](#4-restore-database) - - Run RKE command to restore the `etcd` database to a single node. - -5. [Bring Up the Cluster](#5-bring-up-the-cluster) - - Run RKE commands to bring up cluster one a single node. Clean up old nodes. Verify and add additional nodes. +- [1. Preparation](#1-preparation) +- [2. Place Snapshot and PKI Bundle](#2-place-snapshot-and-pki-bundle) +- [3. Configure RKE](#3-configure-rke) +- [4. Restore Database](#4-restore-database) +- [5. Bring Up the Cluster](#5-bring-up-the-cluster) +
### 1. Preparation diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index 2dde9a9e39b..19394fb0593 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -1,13 +1,51 @@ --- -title: Server Tags +title: Choosing a Version of Rancher weight: 230 --- -{{< product >}} Server is distributed as a Docker image, which have _tags_ attached to them. Tags are used to identify what version is included in the image. Rancher includes additional tags that point to a specific version. Remember that if you use the additional tags, you must explicitly pull a new version of that image tag. Otherwise it will use the cached image on the host. +## Single Node Installs + +When performing single-node installs, upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. Remember that if you use the additional tags, you must explicitly pull a new version of that image tag. Otherwise it will use the cached image on the host. You can find Rancher images at [DockerHub](https://hub.docker.com/r/rancher/rancher/tags/). -- `rancher/rancher:latest`: Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. -- `rancher/rancher:stable`: Our newest stable release. This tag is recommended for production. +| Tag | Description | +| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. | +
The `master` tag or any tag with a `-rc` or another suffix is meant for the {{< product >}} testing team to validate. You should not use these tags, as these builds are not officially supported. + +## High Availability Installs + +When installing, upgrading, or rolling back Rancher Server in a high availability configuration, you can choose what version of Rancher you want to use. + +The images available for installation are controlled by two factors: + +- The Rancher Helm repository that you configured (or will configure) during initial installation of Rancher Server. +- The `rancherImageTag` option, which you can set from the command line. + +### Rancher Chart Repositories + +In high availability Rancher configurations, Rancher Server is distributed by Helm chart. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must configure a chart repository that contains Docker images for Rancher. You can install Rancher from two different repos: + +Repository | Repo Configuration Command | Description +-----------|-----|------------- +`latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo. +`stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. + +>**Important!** +> +>When _upgrading_ or _rolling back_ Rancher in a high availability configuration, you must use the same repository that you used during installation. + +### Chart Option: `rancherImageTag` + +When installing Rancher in a high availability configuration, the latest version is used by default. However, if you want to install specific version of Rancher, you can set the `rancherImageTag` [option]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#advanced-options). This option sets the version of Rancher that's deployed when you install by Helm chart. + +>**Note:** The versions of Rancher available in [chart repository](#rancher-chart-repositories) are different. \ No newline at end of file diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md index 55021f59cb3..2b5187324ce 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md @@ -45,14 +45,18 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve helm repo update ``` -1. Fetch the latest `rancher-latest/rancher` chart. +1. Fetch the latest Rancher Server chart from the helm repository that you used during installation. This will pull down the chart and save it in the current directory as a `.tgz` file. + >**Note:** This command assumes you have the `latest` Rancher helm repo configured. If you initially installed Rancher using the `stable` helm repo, replace `rancher-latest` with `rancher-stable`. For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). + ```plain helm fetch rancher-latest/rancher ``` + + 1. Render the upgrade template. Use the same `--set` values you used for the install. Remember to set the `--is-upgrade` flag for `helm`. This will create a `rancher` directory with the Kubernetes manifest files. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md index 2a091738e35..0f1f3696715 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md @@ -46,6 +46,8 @@ Since there are times where the helm chart will require changes without any chan Run `helm search rancher` to view which Rancher version will be launched for the specific helm chart version. + + ``` NAME CHART VERSION APP VERSION DESCRIPTION rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Server to manage Kubernetes clusters acro... @@ -78,8 +80,9 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve ``` > **Important:** + > >- For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. - >- During upgrade, you must use the Rancher chart repository that you used during initial install (the `latest` repository by default). For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/) + >- During upgrade, you must use the Rancher chart repository that you used during initial install (the `latest` repository by default). For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). ## Rolling Back From 4dad33683b1353b9462da8997f0a00002d615d65 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Wed, 17 Oct 2018 15:27:55 +0200 Subject: [PATCH 17/30] Add FAQ on adding args/binds/envvars to k8s components --- content/rancher/v2.x/en/faq/technical/_index.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/content/rancher/v2.x/en/faq/technical/_index.md b/content/rancher/v2.x/en/faq/technical/_index.md index 86103435ecb..2ead99f4b86 100644 --- a/content/rancher/v2.x/en/faq/technical/_index.md +++ b/content/rancher/v2.x/en/faq/technical/_index.md @@ -119,3 +119,7 @@ A node is required to have a static IP configured (or a reserved IP via DHCP). I When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{< baseurl >}}/rancher/v2.x/en/faq/cleaning-cluster-nodes/) to clean the node. When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. + +### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? + +You can add additional arguments/binds/environment variables via the [Config File]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{< baseurl >}}/rke/v0.1.x/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{< baseurl >}}/rke/v0.1.x/en/example-yamls/). From 68c0d238a7f60fcb5052f8e9b505ac5dbddfa607 Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Tue, 16 Oct 2018 19:07:27 -0700 Subject: [PATCH 18/30] updating server tags topic for ha --- .../v2.x/en/installation/server-tags/_index.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index 19394fb0593..8e06f99cb01 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -5,7 +5,7 @@ weight: 230 ## Single Node Installs -When performing single-node installs, upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. +When performing single-node installs, upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. ### Server Tags @@ -17,18 +17,18 @@ You can find Rancher images at [DockerHub](https://hub.docker.com/r/rancher/ranc | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | | `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. |
-The `master` tag or any tag with a `-rc` or another suffix is meant for the {{< product >}} testing team to validate. You should not use these tags, as these builds are not officially supported. +>**Note:** The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. ## High Availability Installs -When installing, upgrading, or rolling back Rancher Server in a high availability configuration, you can choose what version of Rancher you want to use. +When installing, upgrading, or rolling back Rancher Server in a [high availability configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha), you can choose what version of Rancher you want to use. The images available for installation are controlled by two factors: -- The Rancher Helm repository that you configured (or will configure) during initial installation of Rancher Server. +- The repository of Helm charts that you will configur (or have configured) during initial installation of Rancher Server. - The `rancherImageTag` option, which you can set from the command line. ### Rancher Chart Repositories @@ -37,8 +37,8 @@ In high availability Rancher configurations, Rancher Server is distributed by He Repository | Repo Configuration Command | Description -----------|-----|------------- -`latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo. -`stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. +`latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. +`stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. >**Important!** > From 4b43589319482486063cd62b660293716e11ddc2 Mon Sep 17 00:00:00 2001 From: MBishop17 Date: Wed, 17 Oct 2018 14:51:09 -0700 Subject: [PATCH 19/30] updating ha install/upgrade cmds for new latest repo --- .../install-rancher/_index.md | 14 +++++------ .../en/installation/ha/helm-rancher/_index.md | 25 +++++++++++-------- .../en/installation/server-tags/_index.md | 11 ++++---- .../rollbacks/ha-server-rollbacks/_index.md | 2 +- .../ha-server-upgrade-helm-airgap/_index.md | 6 ++--- .../upgrades/ha-server-upgrade-helm/_index.md | 6 +++-- 6 files changed, 35 insertions(+), 29 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md index 9689a7d9163..81283ffc762 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md @@ -80,12 +80,12 @@ Fetch and render the `helm` charts on a system that has internet access. #### Cert-Manager -If you are installing Rancher with Rancher Self-Signed certificates you will need to install 'cert-manager' on your cluster. If you are installing your own certificates you may skip this section. +If you are installing Rancher with Rancher self-signed certificates you will need to install 'cert-manager' on your cluster. If you are installing your own certificates you may skip this section. -Fetch the latest `stable/cert-manager` chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +Fetch the latest `cert-manager` chart from your configured repo, replacing `` with the repo you're using (`latest` or `stable`). For more information on chart repo configuration, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). ```plain -helm fetch stable/cert-manager +helm fetch /cert-manager ``` Render the template with the option you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. @@ -98,16 +98,16 @@ helm template ./cert-manager-.tgz --output-dir . \ #### Rancher -Install the Rancher chart repo. +Install the Rancher chart repo. Replace `` with the repository that you're using ('latest' or 'stable'). ```plain -helm repo add rancher-stable https://releases.rancher.com/server-charts/stable +helm repo add rancher- https://releases.rancher.com/server-charts/ ``` -Fetch the latest `rancher-latest/rancher` chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. Replace `` with the repo you're using (`latest` or `stable`). ```plain -helm fetch rancher-latest/rancher +helm fetch rancher-/rancher ``` Render the template with the options you would use to install the chart. See [Install Rancher]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/) for details on the various options. Remember to set the `rancherImage` option to pull the image from your private registry. This will create a `rancher` directory with the Kubernetes manifest files. diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md index 3d9af17c245..b9a982a5aad 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md @@ -9,12 +9,14 @@ Rancher installation is now managed using the Helm package manager for Kubernete ### Add the Chart Repo -Use `helm repo add` to add the Rancher chart repository. +Use `helm repo add` command to add the Rancher chart repository. ->**Note:** We recommend adding the `latest` Rancher chart repository used in the command below, but you also have the option of using our `stable` repository. For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). +Replace `` with the chart repository that you want to use (either `latest` or `stable`). + +>**Note:** For more information about each repository and which is best for your use case, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). ``` -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +helm repo add rancher- https://releases.rancher.com/server-charts/ ``` @@ -37,10 +39,11 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve Rancher relies on [cert-manager](https://github.com/kubernetes/charts/tree/master/stable/cert-manager) from the Kubernetes Helm stable catalog to issue self-signed or LetsEncrypt certificates. -Install `cert-manager` from the Helm stable catalog. +Install `cert-manager` from your Helm catalog. Replace `` with the repository that you configured in [Add the Chart Repo](#add-the-chart-repo) (`latest` or `stable`). + ``` -helm install stable/cert-manager \ +helm install /cert-manager \ --name cert-manager \ --namespace kube-system ``` @@ -61,12 +64,12 @@ There are three options for the source of the certificate. The default is for Rancher to generate a CA and use the `cert-manager` to issue the certificate for access to the Rancher server interface. -The only requirement is to set the `hostname` to the DNS name you pointed at your load balancer. +The only requirement is to set the `hostname` to the DNS name you pointed at your load balancer. Replace `` with the repository that you configured in [Add the Chart Repo](#add-the-chart-repo) (`latest` or `stable`). >**Using Air Gap?** [Set the `rancherImage` option]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#install-rancher-using-private-registry) in your command, pointing toward your private registry. ``` -helm install rancher-latest/rancher \ +helm install rancher-/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org @@ -76,12 +79,12 @@ helm install rancher-latest/rancher \ Use [LetsEncrypt](https://letsencrypt.org/)'s free service to issue trusted SSL certs. This configuration uses http validation so the Load Balancer must have a Public DNS record and be accessible from the internet. -Set `hostname`, `ingress.tls.source=letsEncrypt` and LetsEncrypt options. +Set `hostname`, `ingress.tls.source=letsEncrypt` and LetsEncrypt options. Replace `` with the repository that you configured in [Add the Chart Repo](#add-the-chart-repo) (`latest` or `stable`). >**Using Air Gap?** [Set the `rancherImage` option]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#install-rancher-using-private-registry) in your command, pointing toward your private registry. ``` -helm install rancher-latest/rancher \ +helm install rancher-/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ @@ -95,12 +98,12 @@ Create Kubernetes secrets from your own certificates for Rancher to use. > **Note:** The common name for the cert will need to match the `hostname` option or the ingress controller will fail to provision the site for Rancher. -Set `hostname` and `ingress.tls.source=secret`. +Set `hostname` and `ingress.tls.source=secret`. Replace `` with the repository that you configured in [Add the Chart Repo](#add-the-chart-repo) (`latest` or `stable`). > **Note:** If you are using a Private CA signed cert, add `--set privateCA=true` ``` -helm install rancher-latest/rancher \ +helm install rancher-/rancher \ --name rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index 8e06f99cb01..c46f16e76c6 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -5,13 +5,11 @@ weight: 230 ## Single Node Installs -When performing single-node installs, upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. +When performing [single-node installs]({{< baseurl >}}/rancher/v2.x/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. ### Server Tags -Rancher Server is distributed as a Docker image, which have tags attached to them. Remember that if you use the additional tags, you must explicitly pull a new version of that image tag. Otherwise it will use the cached image on the host. -You can find Rancher images at [DockerHub](https://hub.docker.com/r/rancher/rancher/tags/). - +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise any image cached on the host will be used. | Tag | Description | | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -20,6 +18,7 @@ You can find Rancher images at [DockerHub](https://hub.docker.com/r/rancher/ranc | `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. |
+ >**Note:** The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. ## High Availability Installs @@ -28,7 +27,7 @@ When installing, upgrading, or rolling back Rancher Server in a [high availabili The images available for installation are controlled by two factors: -- The repository of Helm charts that you will configur (or have configured) during initial installation of Rancher Server. +- The repository of Helm charts that you will configure (or have configured) during initial installation of Rancher Server. - The `rancherImageTag` option, which you can set from the command line. ### Rancher Chart Repositories @@ -39,6 +38,8 @@ Repository | Repo Configuration Command | Description -----------|-----|------------- `latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. `stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. +
+Instructions on when to make these configurations are available in [High Availability Install]({{< baseurl >}}/rancher/v2.x/en/installation/ha). >**Important!** > diff --git a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md index 1a8cd596d01..04e1b27935d 100644 --- a/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md +++ b/content/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/_index.md @@ -12,4 +12,4 @@ To restore Rancher follow the procedure detailed here: [Restoring Backups — Hi Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. -> **Note:** Managed cluster are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. +>**Note:** Managed cluster are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md index 2b5187324ce..708862ae83c 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/_index.md @@ -47,12 +47,12 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve 1. Fetch the latest Rancher Server chart from the helm repository that you used during installation. - This will pull down the chart and save it in the current directory as a `.tgz` file. + This command will pull down the chart and save it in the current directory as a `.tgz` file. Replace `` with the name of the repository that you used during installation (either `stable` or `latest`). - >**Note:** This command assumes you have the `latest` Rancher helm repo configured. If you initially installed Rancher using the `stable` helm repo, replace `rancher-latest` with `rancher-stable`. For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). + >**Note:** During upgrades, you must fetch from the chart repo that you configured initial installation (either the `stable` or `latest` repository). For more information, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). ```plain - helm fetch rancher-latest/rancher + helm fetch rancher-/rancher ``` diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md index 0f1f3696715..35ce584dbd5 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md @@ -75,14 +75,16 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve 3. Take all values from the previous command and use `helm` with `--set` options to upgrade Rancher to the latest version. + Replace `` with the name of the repository that you used during installation (either `stable` or `latest`). + ``` - helm upgrade rancher rancher-latest/rancher --set hostname=rancher.my.org + helm upgrade rancher rancher-/rancher --set hostname=rancher.my.org ``` > **Important:** > >- For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. - >- During upgrade, you must use the Rancher chart repository that you used during initial install (the `latest` repository by default). For more information, see [Server Tags]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/). + >- During upgrade, you must use the Rancher chart repository that you used during initial install (either the `stable` or `latest` repository). For more information, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). ## Rolling Back From 4911d6066d68c038d7ac5b17e3a1c419652ffd57 Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Wed, 17 Oct 2018 16:51:01 -0700 Subject: [PATCH 20/30] fixing fetch instructions for cert manager --- .../air-gap-installation/install-rancher/_index.md | 4 ++-- .../rancher/v2.x/en/installation/ha/helm-rancher/_index.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md index 81283ffc762..0808d5a3988 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md @@ -82,10 +82,10 @@ Fetch and render the `helm` charts on a system that has internet access. If you are installing Rancher with Rancher self-signed certificates you will need to install 'cert-manager' on your cluster. If you are installing your own certificates you may skip this section. -Fetch the latest `cert-manager` chart from your configured repo, replacing `` with the repo you're using (`latest` or `stable`). For more information on chart repo configuration, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). +Fetch the latest `cert-manager` chart from your Helm catalog. ```plain -helm fetch /cert-manager +helm fetch stable/cert-manager ``` Render the template with the option you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md index b9a982a5aad..9a2b7830a7d 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md @@ -39,11 +39,11 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve Rancher relies on [cert-manager](https://github.com/kubernetes/charts/tree/master/stable/cert-manager) from the Kubernetes Helm stable catalog to issue self-signed or LetsEncrypt certificates. -Install `cert-manager` from your Helm catalog. Replace `` with the repository that you configured in [Add the Chart Repo](#add-the-chart-repo) (`latest` or `stable`). +Install `cert-manager` from your Helm catalog. ``` -helm install /cert-manager \ +helm install stable/cert-manager \ --name cert-manager \ --namespace kube-system ``` From d4e7b048894a5ec3fa150ebfeb3e3848a45a68a0 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Thu, 18 Oct 2018 15:15:15 +0200 Subject: [PATCH 21/30] Escape commas in noProxy for Helm install --- .../en/installation/ha/helm-rancher/chart-options/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md index c7a12773f3f..d9c3eb50cb1 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md @@ -29,7 +29,7 @@ weight: 276 | `debug` | false | `bool` - set debug flag on rancher server | | `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | | `proxy` | "" | `string` - string - HTTP[S] proxy server for Rancher | -| `noProxy` | "localhost,127.0.0.1" | `string` - comma separated list of hostnames or ip address not to use the proxy | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | | `resources` | {} | `map` - rancher pod resource requests & limits | | `rancherImage` | "rancher/rancher" | `string` - rancher image source | | `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | @@ -59,7 +59,7 @@ Add your IP exceptions to the `noProxy` list. Make sure you add the Service clus ```plain --set proxy="http://:@:/" ---set noProxy="127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" ``` ### Additional Trusted CAs From 1b668b2f2782a21a56778b6c99e2095e9b6b7800 Mon Sep 17 00:00:00 2001 From: niusmallnan Date: Thu, 18 Oct 2018 20:38:43 +0800 Subject: [PATCH 22/30] Update ecs images and os security for v1.4.2 --- content/os/v1.x/en/about/security/_index.md | 1 + .../v1.x/en/installation/amazon-ecs/_index.md | 32 +++++++++---------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/content/os/v1.x/en/about/security/_index.md b/content/os/v1.x/en/about/security/_index.md index a5fa3da3ae8..5ff2e307ffe 100644 --- a/content/os/v1.x/en/about/security/_index.md +++ b/content/os/v1.x/en/about/security/_index.md @@ -35,3 +35,4 @@ weight: 303 | [CVE-2018-8897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-8897) | A statement in the System Programming Guide of the Intel 64 and IA-32 Architectures Software Developer's Manual (SDM) was mishandled in the development of some or all operating-system kernels, resulting in unexpected behavior for #DB exceptions that are deferred by MOV SS or POP SS, as demonstrated by (for example) privilege escalation in Windows, macOS, some Xen configurations, or FreeBSD, or a Linux kernel crash. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | | [L1 Terminal Fault](https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | | [CVE-2018-3639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639) | Systems with microprocessors utilizing speculative execution and speculative execution of memory reads before the addresses of all prior memory writes are known may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis, aka Speculative Store Bypass (SSB), Variant 4. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | +| [CVE-2018-17182](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17182) | The vmacache_flush_all function in mm/vmacache.c mishandles sequence number overflows. An attacker can trigger a use-after-free (and possibly gain privileges) via certain thread creation, map, unmap, invalidation, and dereference operations. | 18 Oct 2018 | [RancherOS v1.4.2](https://github.com/rancher/os/releases/tag/v1.4.2) using Linux v4.14.73 | diff --git a/content/os/v1.x/en/installation/amazon-ecs/_index.md b/content/os/v1.x/en/installation/amazon-ecs/_index.md index 9c61c81c10e..18f100eea18 100644 --- a/content/os/v1.x/en/installation/amazon-ecs/_index.md +++ b/content/os/v1.x/en/installation/amazon-ecs/_index.md @@ -58,22 +58,22 @@ rancher: ### Amazon ECS enabled AMIs -Latest Release: [v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) +Latest Release: [v1.4.2](https://github.com/rancher/os/releases/tag/v1.4.2) Region | Type | AMI ---|--- | --- -ap-south-1 | HVM - ECS enabled | [ami-0c095bd65873104ea](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-0c095bd65873104ea) -eu-west-3 | HVM - ECS enabled | [ami-0a9420a7b9a46517b](https://eu-west-3.console.aws.amazon.com/ec2/home?region=eu-west-3#launchInstanceWizard:ami=ami-0a9420a7b9a46517b) -eu-west-2 | HVM - ECS enabled | [ami-09f7882ec876661f9](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-09f7882ec876661f9) -eu-west-1 | HVM - ECS enabled | [ami-0dd35c5333b908688](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-0dd35c5333b908688) -ap-northeast-2 | HVM - ECS enabled | [ami-0272129f9db7717d1](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-0272129f9db7717d1) -ap-northeast-1 | HVM - ECS enabled | [ami-0cc3f7df2e7cac07a](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-0cc3f7df2e7cac07a) -sa-east-1 | HVM - ECS enabled | [ami-0b8bc2a235e2ba0b8](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-0b8bc2a235e2ba0b8) -ca-central-1 | HVM - ECS enabled | [ami-0834633a15bc44f0c](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-0834633a15bc44f0c) -ap-southeast-1 | HVM - ECS enabled | [ami-076072ffb77b9e9c7](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-076072ffb77b9e9c7) -ap-southeast-2 | HVM - ECS enabled | [ami-0b39a6595e83e016d](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-0b39a6595e83e016d) -eu-central-1 | HVM - ECS enabled | [ami-0a8b8e376349bd511](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-0a8b8e376349bd511) -us-east-1 | HVM - ECS enabled | [ami-0683608046ab95a13](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-0683608046ab95a13) -us-east-2 | HVM - ECS enabled | [ami-0d6a98791e2f98a13](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-0d6a98791e2f98a13) -us-west-1 | HVM - ECS enabled | [ami-0880d73d3ea92c89c](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-0880d73d3ea92c89c) -us-west-2 | HVM - ECS enabled | [ami-0626403624bc30288](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-0626403624bc30288) +ap-south-1 | HVM - ECS enabled | [ami-0721722dd0f0a6b54](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-0721722dd0f0a6b54) +eu-west-3 | HVM - ECS enabled | [ami-017eb997502d38415](https://eu-west-3.console.aws.amazon.com/ec2/home?region=eu-west-3#launchInstanceWizard:ami=ami-017eb997502d38415) +eu-west-2 | HVM - ECS enabled | [ami-08772e5a96934e3e5](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-08772e5a96934e3e5) +eu-west-1 | HVM - ECS enabled | [ami-089bd570fab84ab89](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-089bd570fab84ab89) +ap-northeast-2 | HVM - ECS enabled | [ami-0420afe0617d4f723](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-0420afe0617d4f723) +ap-northeast-1 | HVM - ECS enabled | [ami-05bee9d87b6af1f5c](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-05bee9d87b6af1f5c) +sa-east-1 | HVM - ECS enabled | [ami-0bc2d9e3a0c98158c](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-0bc2d9e3a0c98158c) +ca-central-1 | HVM - ECS enabled | [ami-0c09398512d4ba6b9](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-0c09398512d4ba6b9) +ap-southeast-1 | HVM - ECS enabled | [ami-0ffa715a6bb9373de](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-0ffa715a6bb9373de) +ap-southeast-2 | HVM - ECS enabled | [ami-03cb7478f257c6490](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-03cb7478f257c6490) +eu-central-1 | HVM - ECS enabled | [ami-029b85c9d234c4f43](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-029b85c9d234c4f43) +us-east-1 | HVM - ECS enabled | [ami-0f274b6c9410c73ed](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-0f274b6c9410c73ed) +us-east-2 | HVM - ECS enabled | [ami-0cae94276614142ef](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-0cae94276614142ef) +us-west-1 | HVM - ECS enabled | [ami-03f86e5bb88269702](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-03f86e5bb88269702) +us-west-2 | HVM - ECS enabled | [ami-01bde5d57c4d043ad](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-01bde5d57c4d043ad) From ace7413dcf5b63d77792e898274dfff1bb1b7f37 Mon Sep 17 00:00:00 2001 From: Jeff Szielenski Date: Thu, 18 Oct 2018 08:01:34 -0700 Subject: [PATCH 23/30] Update _index.md Fixed grammatical error. --- .../rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md index 049ce3d7e74..377013f3bf0 100644 --- a/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md +++ b/content/rancher/v2.x/en/cluster-provisioning/rke-clusters/_index.md @@ -17,7 +17,7 @@ RKE launched clusters are separated into two categories: - [Custom Nodes]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/): - For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that are already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. + For use cases where you want to provision bare-metal servers, on-premise virtual machines, or bring virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. >**Note:** If you want to reuse a node from a previous custom cluster, [clean the node]({{< baseurl >}}/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. From 372430d060256cc7b5d38581ac7b3d63e85adf6b Mon Sep 17 00:00:00 2001 From: Jeff Szielenski Date: Thu, 18 Oct 2018 10:39:23 -0700 Subject: [PATCH 24/30] Update _index.md Fixed grammatical error. --- content/rancher/v2.x/en/faq/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/rancher/v2.x/en/faq/_index.md b/content/rancher/v2.x/en/faq/_index.md index 534424ce53f..63c03de071a 100644 --- a/content/rancher/v2.x/en/faq/_index.md +++ b/content/rancher/v2.x/en/faq/_index.md @@ -13,7 +13,7 @@ See [Technical FAQ]({{< baseurl >}}/rancher/v2.x/en/faq/technical/), for frequen #### What does it mean when you say Rancher v2.0 is built on Kubernetes? -Rancher v2.0 is a complete container management platform built on 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. +Rancher v2.0 is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. #### Do you plan to implement upstream Kubernetes, or continue to work on your own fork? From 5a5ed49bc0104d067917ddc81c0ddc73fda5f51a Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Thu, 18 Oct 2018 11:49:50 -0700 Subject: [PATCH 25/30] updates per Denise --- .../air-gap-installation/install-rancher/_index.md | 4 ++-- .../rancher/v2.x/en/installation/ha/helm-rancher/_index.md | 2 +- content/rancher/v2.x/en/installation/server-tags/_index.md | 2 +- .../en/upgrades/upgrades/ha-server-upgrade-helm/_index.md | 5 +---- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md index 0808d5a3988..31115d326cf 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md @@ -82,7 +82,7 @@ Fetch and render the `helm` charts on a system that has internet access. If you are installing Rancher with Rancher self-signed certificates you will need to install 'cert-manager' on your cluster. If you are installing your own certificates you may skip this section. -Fetch the latest `cert-manager` chart from your Helm catalog. +Fetch the latest `cert-manager` chart from the [official Helm catalog](https://github.com/helm/charts/tree/master/stable). ```plain helm fetch stable/cert-manager @@ -98,7 +98,7 @@ helm template ./cert-manager-.tgz --output-dir . \ #### Rancher -Install the Rancher chart repo. Replace `` with the repository that you're using ('latest' or 'stable'). +Install the Rancher chart repo. Replace `` with the [repository that you're using]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories) ('latest' or 'stable'). ```plain helm repo add rancher- https://releases.rancher.com/server-charts/ diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md index 9a2b7830a7d..b5a65209d2d 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/_index.md @@ -39,7 +39,7 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve Rancher relies on [cert-manager](https://github.com/kubernetes/charts/tree/master/stable/cert-manager) from the Kubernetes Helm stable catalog to issue self-signed or LetsEncrypt certificates. -Install `cert-manager` from your Helm catalog. +Install `cert-manager` from the [official Helm catalog](https://github.com/helm/charts/tree/master/stable). ``` diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index c46f16e76c6..0dff0b3d694 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -49,4 +49,4 @@ Instructions on when to make these configurations are available in [High Availab When installing Rancher in a high availability configuration, the latest version is used by default. However, if you want to install specific version of Rancher, you can set the `rancherImageTag` [option]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#advanced-options). This option sets the version of Rancher that's deployed when you install by Helm chart. ->**Note:** The versions of Rancher available in [chart repository](#rancher-chart-repositories) are different. \ No newline at end of file +>**Note:** When using this option, you should avoid installing older versions of Rancher using newer Helm charts. \ No newline at end of file diff --git a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md index 35ce584dbd5..9a81b95187e 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/_index.md @@ -81,10 +81,7 @@ rancher-latest/rancher 2018.10.1 v2.1.0 Install Rancher Serve helm upgrade rancher rancher-/rancher --set hostname=rancher.my.org ``` - > **Important:** - > - >- For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. - >- During upgrade, you must use the Rancher chart repository that you used during initial install (either the `stable` or `latest` repository). For more information, see [Choosing a Version of Rancher: Rancher Chart Repositories]({{< baseurl >}}/rancher/v2.x/en/installation/server-tags/#rancher-chart-repositories). + > **Important:** For any values listed from Step 2, you must use `--set key=value` to apply the same values to the helm chart. ## Rolling Back From 6ab685d918a340141cbdc3f5e3b03b66de2339cf Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Thu, 18 Oct 2018 12:50:33 -0700 Subject: [PATCH 26/30] removing image tag section, adding repo options to chart-option section --- .../ha/helm-rancher/chart-options/_index.md | 11 +++++++++++ .../v2.x/en/installation/server-tags/_index.md | 13 +------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md index d94356b3115..bde16f2f6f9 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md @@ -37,6 +37,17 @@ weight: 276
+### Available Chart Repositories + +When installing or upgrading Rancher, you can pull one of two repositories: the `latest` Rancher chart repo or the `stable` chart repo. You can switch between these two repos by entering one of the following commands in the table below. Enter the command for the repository that you want to use _before_ installation or upgrade. + + +Repository | Repo Configuration Command | Description +-----------|-----|------------- +`latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. +`stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. + + ### API Audit Log Enabling the [API Audit Log](https://rancher.com/docs/rancher/v2.x/en/installation/api-auditing/). diff --git a/content/rancher/v2.x/en/installation/server-tags/_index.md b/content/rancher/v2.x/en/installation/server-tags/_index.md index 0dff0b3d694..1696ac5c70d 100644 --- a/content/rancher/v2.x/en/installation/server-tags/_index.md +++ b/content/rancher/v2.x/en/installation/server-tags/_index.md @@ -23,12 +23,7 @@ Rancher Server is distributed as a Docker image, which have tags attached to the ## High Availability Installs -When installing, upgrading, or rolling back Rancher Server in a [high availability configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha), you can choose what version of Rancher you want to use. - -The images available for installation are controlled by two factors: - -- The repository of Helm charts that you will configure (or have configured) during initial installation of Rancher Server. -- The `rancherImageTag` option, which you can set from the command line. +When installing, upgrading, or rolling back Rancher Server in a [high availability configuration]({{< baseurl >}}/rancher/v2.x/en/installation/ha), you can choose what repository from which to pull your Rancher images. ### Rancher Chart Repositories @@ -44,9 +39,3 @@ Instructions on when to make these configurations are available in [High Availab >**Important!** > >When _upgrading_ or _rolling back_ Rancher in a high availability configuration, you must use the same repository that you used during installation. - -### Chart Option: `rancherImageTag` - -When installing Rancher in a high availability configuration, the latest version is used by default. However, if you want to install specific version of Rancher, you can set the `rancherImageTag` [option]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/#advanced-options). This option sets the version of Rancher that's deployed when you install by Helm chart. - ->**Note:** When using this option, you should avoid installing older versions of Rancher using newer Helm charts. \ No newline at end of file From f650d0c8df2b420a6c0191db7ceb043f73a70724 Mon Sep 17 00:00:00 2001 From: Mark Bishop Date: Thu, 18 Oct 2018 13:00:43 -0700 Subject: [PATCH 27/30] removing repos from chart options --- .../ha/helm-rancher/chart-options/_index.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md index bde16f2f6f9..d94356b3115 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/_index.md @@ -37,17 +37,6 @@ weight: 276
-### Available Chart Repositories - -When installing or upgrading Rancher, you can pull one of two repositories: the `latest` Rancher chart repo or the `stable` chart repo. You can switch between these two repos by entering one of the following commands in the table below. Enter the command for the repository that you want to use _before_ installation or upgrade. - - -Repository | Repo Configuration Command | Description ------------|-----|------------- -`latest` | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. -`stable` | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. - - ### API Audit Log Enabling the [API Audit Log](https://rancher.com/docs/rancher/v2.x/en/installation/api-auditing/). From 147052d41f6882aeba4546687d39a73be47e6de5 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Tue, 2 Oct 2018 16:28:34 +0200 Subject: [PATCH 28/30] Reorganize HPA page based on latest releases --- .../horitzontal-pod-autoscaler/_index.md | 623 ++++++++++-------- src/img/rancher/horizontal-pod-autoscaler.jpg | Bin 0 -> 38147 bytes 2 files changed, 332 insertions(+), 291 deletions(-) create mode 100644 src/img/rancher/horizontal-pod-autoscaler.jpg diff --git a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md index 8926a33aeb5..8b6bd0d4c78 100644 --- a/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ b/content/rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md @@ -5,6 +5,8 @@ weight: 2300 Using the Kubernetes [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) feature (HPA), you can configure your cluster to automatically scale the services it's running up or down. +>**Note:** Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. + ### Why Use Horizontal Pod Autoscaler? Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: @@ -20,11 +22,10 @@ HPA improves your services by: ### How HPA Works -![HPA Schema]({{< baseurl >}}/img/rancher/horizontal-pod-autoscaler.svg) +![HPA Schema]({{< baseurl >}}/img/rancher/horizontal-pod-autoscaler.jpg) HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: - Flag | Default | Description | ---------|----------|----------| `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. @@ -36,13 +37,13 @@ For full documentation on HPA, refer to the [Kubernetes Documentation](https://k ### Horizontal Pod Autoscaler API Objects -HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. +HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). ### kubectl Commands -You can create, manage, and delete HPAs using kubectl: +You can create, manage, and delete HPAs using kubectl: - Creating HPA @@ -98,113 +99,39 @@ Directive | Description `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory.
-### Installation - -Before you can use HPA in your Kubernetes cluster, you must fulfill some requirements. - -#### Requirements - -Be sure that your Kubernetes cluster services are running with these flags at minimum: - -- kube-api: `requestheader-client-ca-file` -- kubelet: `read-only-port` at 10255 -- kube-controller: Optional, just needed if distinct values than default are required. - - - `horizontal-pod-autoscaler-downscale-delay: "5m0s"` - - `horizontal-pod-autoscaler-upscale-delay: "3m0s"` - - `horizontal-pod-autoscaler-sync-period: "30s"` - -For an RKE Kubernetes cluster definition, add this snippet in the `services` section. To add this snippet using the Rancher v2.0 UI, open the **Clusters** view and select **Ellipsis (...) > Edit** for the cluster in which you want to use HPA. Then, from **Cluster Options**, click **Edit as YAML**. Add the following snippet to the `services` section: - -``` -services: -... - kube-api: - extra_args: - requestheader-client-ca-file: "/etc/kubernetes/ssl/kube-ca.pem" - kube-controller: - extra_args: - horizontal-pod-autoscaler-downscale-delay: "5m0s" - horizontal-pod-autoscaler-upscale-delay: "1m0s" - horizontal-pod-autoscaler-sync-period: "30s" - kubelet: - extra_args: - read-only-port: 10255 -``` - -Once the Kubernetes cluster is configured and deployed, you can deploy metrics services. - ->**Note:** kubectl command samples in the sections that follow were tested in a cluster running Rancher v2.0.6 and Kubernetes v1.10.1. - #### Configuring HPA to Scale Using Resource Metrics -To create HPA resources based on resource metrics such as CPU and memory use, you need to deploy the `metrics-server` package in the `kube-system` namespace of your Kubernetes cluster. This deployment allows HPA to consume the `metrics.k8s.io` API. +Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. Run the following commands to check if metrics are available in your installation: ->**Prerequisite:** You must be running kubectl 1.8 or later. +``` +$ kubectl top nodes +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +node-controlplane 196m 9% 1623Mi 42% +node-etcd 80m 4% 1090Mi 28% +node-worker 64m 3% 1146Mi 29% +$ kubectl -n kube-system top pods +NAME CPU(cores) MEMORY(bytes) +canal-pgldr 18m 46Mi +canal-vhkgr 20m 45Mi +canal-x5q5v 17m 37Mi +canal-xknnz 20m 37Mi +kube-dns-7588d5b5f5-298j2 0m 22Mi +kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi +metrics-server-97bc649d5-jxrlt 0m 12Mi +$ kubectl -n kube-system logs -l k8s-app=metrics-server +I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true +I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 +I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version +I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 +I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink +I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) +I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ +I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 +``` -1. Connect to your Kubernetes cluster using kubectl. - -1. Clone the GitHub `metrics-server` repo: - ``` - # git clone https://github.com/kubernetes-incubator/metrics-server - ``` - -1. Install the `metrics-server` package. - ``` - # kubectl create -f metrics-server/deploy/1.8+/ - ``` - -1. Check that `metrics-server` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check the service pod for a status of `running`. Enter the following command: - ``` - # kubectl get pods -n kube-system - ``` - Then check for the status of `running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - metrics-server-6fbfb84cdd-t2fk9 1/1 Running 0 8h - ... - ``` - 1. Check the service logs for service availability. Enter the following command: - ``` - # kubectl -n kube-system logs metrics-server-6fbfb84cdd-t2fk9 - ``` - Then review the log to confirm that that the `metrics-server` package is running. - {{% accordion id="metrics-server-run-check" label="Metrics Server Log Output" %}} - I0723 08:09:56.193136 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:'' - I0723 08:09:56.193574 1 heapster.go:72] Metrics Server version v0.2.1 - I0723 08:09:56.194480 1 configs.go:61] Using Kubernetes client with master "https://10.43.0.1:443" and version - I0723 08:09:56.194501 1 configs.go:62] Using kubelet port 10255 - I0723 08:09:56.198612 1 heapster.go:128] Starting with Metric Sink - I0723 08:09:56.780114 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) - I0723 08:09:57.391518 1 heapster.go:101] Starting Heapster API server... - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] listing is available at https:///swaggerapi - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ - I0723 08:09:57.394080 1 serve.go:85] Serving securely on 0.0.0.0:443 - {{% /accordion %}} - - -1. Check that the metrics api is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/metrics.k8s.io/v1beta1 - ``` - If the the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/metrics.k8s.io/v1beta1 - ``` - If the the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` +If you have created your cluster in Rancher v2.0.6 or before, please refer to [Manual installation](#manual-installation) #### Configuring HPA to Scale Using Custom Metrics (Prometheus) @@ -293,210 +220,136 @@ For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter]( {{% /accordion %}} -#### Assigning Additional Required Roles to Your HPA - -By default, HPA reads resource and custom metrics with the user `system:anonymous`. Assign `system:anonymous` the the `view-resource-metrics` and `view-custom-metrics` in the ClusterRole and ClusterRoleBindings manifests. These roles are used to access metrics. - -To do it, follow these steps: - -1. Configure kubectl to connect to your cluster. - -1. Copy the ClusterRole and ClusterRoleBinding manifest for the type of metrics you're using for your HPA. - {{% accordion id="cluster-role-resource-metrics" label="Resource Metrics: ApiGroups resource.metrics.k8s.io" %}} - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-resource-metrics - rules: - - apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-resource-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-resource-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - {{% /accordion %}} -{{% accordion id="cluster-role-custom-resources" label="Custom Metrics: ApiGroups custom.metrics.k8s.io" %}} - - ``` - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-custom-metrics - rules: - - apiGroups: - - custom.metrics.k8s.io - resources: - - "*" - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-custom-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-custom-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - ``` -{{% /accordion %}} -1. Create them in your cluster using one of the follow commands, depending on the metrics you're using. - ``` - # kubectl create -f - # kubectl create -f - ``` - ### Testing HPAs with a Service Deployment -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. +For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. 1. Configure kubectl to connect to your Kubernetes cluster. 2. Copy the `hello-world` deployment manifest below. {{% accordion id="hello-world" label="Hello World Manifest" %}} - apiVersion: apps/v1beta2 - kind: Deployment - metadata: - labels: - app: hello-world - name: hello-world - namespace: default - spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always - --- - apiVersion: v1 - kind: Service - metadata: - name: hello-world - namespace: default - spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world +``` +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + labels: + app: hello-world + name: hello-world + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + resources: + requests: + cpu: 500m + memory: 64Mi + ports: + - containerPort: 80 + protocol: TCP + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: hello-world + namespace: default +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world +``` {{% /accordion %}} - - 1. Deploy it to your cluster. ``` # kubectl create -f ``` -1. Copy one of the HPAs below based on the metric type you're using: - {{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} - apiVersion: autoscaling/v2beta1 - kind: HorizontalPodAutoscaler - metadata: - name: hello-world - namespace: default - spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi - {{% /accordion %}} - {{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} - apiVersion: autoscaling/v2beta1 - kind: HorizontalPodAutoscaler - metadata: - name: hello-world - namespace: default - spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m - {{% /accordion %}} +1. Copy one of the HPAs below based on the metric type you're using: +{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} +``` +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world + namespace: default +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 1000Mi +``` +{{% /accordion %}} +{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} +``` +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world + namespace: default +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi + - type: Pods + pods: + metricName: cpu_system + targetAverageValue: 20m +``` +{{% /accordion %}} 1. View the HPA info and description. Confirm that metric data is shown. {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following command. +1. Enter the following commands. ``` # kubectl get hpa - ``` - You should receive the output that follows: - ``` NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa + # kubectl describe hpa Name: hello-world Namespace: default Labels: @@ -552,7 +405,7 @@ For HPA to work correctly, service deployments should have resources request def 1. Test that pod autoscaling works as intended.

**To Test Autoscaling Using Resource Metrics:** {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to to scale up to two pods based on CPU Usage. +Use your load testing tool to to scale up to two pods based on CPU Usage. 1. View your HPA. ``` @@ -671,7 +524,7 @@ Use your load testing to to scale down to 1 pod when all metrics are below targe Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target ``` {{% /accordion %}} -
+
**To Test Autoscaling Using Custom Metrics:** {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} Use your load testing tool to upscale two pods based on CPU usage. @@ -855,6 +708,8 @@ Use your load testing tool to scale down to one pod when all metrics below targe ``` {{% /accordion %}} + + ### Conclusion Horizontal Pod Autoscaling is a great way to automate the number of pod you have deployed for maximum efficiency. You can use it to accommodate deployment scale to real service load and to meet service level agreements. @@ -863,4 +718,190 @@ By adjusting the `horizontal-pod-autoscaler-downscale-delay` and `horizontal-pod We've demonstrated how to setup an HPA based on custom metrics provided by Prometheus. We used the `cpu_system` metric as an example, but you can use other metrics that monitor service performance, like `http_request_number`, `http_response_time`, etc. ->**Note:**To facilitate HPA use, we are working to integrate metric-server as an addon on RKE cluster deployments. This feature is included in RKE v0.1.9-rc2 for testing, but is not officially supported as of yet. It would be supported at rke v0.1.9. \ No newline at end of file + +### Manual Installation + +>**Note:** This is only applicable to clusters created in versions before Rancher v2.0.7. + +Before you can use HPA in your Kubernetes cluster, you must fulfill some requirements. + +#### Requirements + +Be sure that your Kubernetes cluster services are running with these flags at minimum: + +- kube-api: `requestheader-client-ca-file` +- kubelet: `read-only-port` at 10255 +- kube-controller: Optional, just needed if distinct values than default are required. + + - `horizontal-pod-autoscaler-downscale-delay: "5m0s"` + - `horizontal-pod-autoscaler-upscale-delay: "3m0s"` + - `horizontal-pod-autoscaler-sync-period: "30s"` + +For an RKE Kubernetes cluster definition, add this snippet in the `services` section. To add this snippet using the Rancher v2.0 UI, open the **Clusters** view and select **Ellipsis (...) > Edit** for the cluster in which you want to use HPA. Then, from **Cluster Options**, click **Edit as YAML**. Add the following snippet to the `services` section: + +``` +services: +... + kube-api: + extra_args: + requestheader-client-ca-file: "/etc/kubernetes/ssl/kube-ca.pem" + kube-controller: + extra_args: + horizontal-pod-autoscaler-downscale-delay: "5m0s" + horizontal-pod-autoscaler-upscale-delay: "1m0s" + horizontal-pod-autoscaler-sync-period: "30s" + kubelet: + extra_args: + read-only-port: 10255 +``` + +Once the Kubernetes cluster is configured and deployed, you can deploy metrics services. + +>**Note:** kubectl command samples in the sections that follow were tested in a cluster running Rancher v2.0.6 and Kubernetes v1.10.1. + +#### Configuring HPA to Scale Using Resource Metrics + +To create HPA resources based on resource metrics such as CPU and memory use, you need to deploy the `metrics-server` package in the `kube-system` namespace of your Kubernetes cluster. This deployment allows HPA to consume the `metrics.k8s.io` API. + +>**Prerequisite:** You must be running kubectl 1.8 or later. + +1. Connect to your Kubernetes cluster using kubectl. + +1. Clone the GitHub `metrics-server` repo: + ``` + # git clone https://github.com/kubernetes-incubator/metrics-server + ``` + +1. Install the `metrics-server` package. + ``` + # kubectl create -f metrics-server/deploy/1.8+/ + ``` + +1. Check that `metrics-server` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check the service pod for a status of `running`. Enter the following command: + ``` + # kubectl get pods -n kube-system + ``` + Then check for the status of `running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + metrics-server-6fbfb84cdd-t2fk9 1/1 Running 0 8h + ... + ``` + 1. Check the service logs for service availability. Enter the following command: + ``` + # kubectl -n kube-system logs metrics-server-6fbfb84cdd-t2fk9 + ``` + Then review the log to confirm that that the `metrics-server` package is running. + {{% accordion id="metrics-server-run-check" label="Metrics Server Log Output" %}} + I0723 08:09:56.193136 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:'' + I0723 08:09:56.193574 1 heapster.go:72] Metrics Server version v0.2.1 + I0723 08:09:56.194480 1 configs.go:61] Using Kubernetes client with master "https://10.43.0.1:443" and version + I0723 08:09:56.194501 1 configs.go:62] Using kubelet port 10255 + I0723 08:09:56.198612 1 heapster.go:128] Starting with Metric Sink + I0723 08:09:56.780114 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) + I0723 08:09:57.391518 1 heapster.go:101] Starting Heapster API server... + [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] listing is available at https:///swaggerapi + [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ + I0723 08:09:57.394080 1 serve.go:85] Serving securely on 0.0.0.0:443 + {{% /accordion %}} + + +1. Check that the metrics api is accessible from kubectl. + + + - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/metrics.k8s.io/v1beta1 + ``` + If the the API is working correctly, you should receive output similar to the output below. + ``` + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} + ``` + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/metrics.k8s.io/v1beta1 + ``` + If the the API is working correctly, you should receive output similar to the output below. + ``` + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} + ``` + +#### Assigning Additional Required Roles to Your HPA + +By default, HPA reads resource and custom metrics with the user `system:anonymous`. Assign `system:anonymous` to `view-resource-metrics` and `view-custom-metrics` in the ClusterRole and ClusterRoleBindings manifests. These roles are used to access metrics. + +To do it, follow these steps: + +1. Configure kubectl to connect to your cluster. + +1. Copy the ClusterRole and ClusterRoleBinding manifest for the type of metrics you're using for your HPA. + {{% accordion id="cluster-role-resource-metrics" label="Resource Metrics: ApiGroups resource.metrics.k8s.io" %}} + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: view-resource-metrics + rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: view-resource-metrics + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view-resource-metrics + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:anonymous + {{% /accordion %}} +{{% accordion id="cluster-role-custom-resources" label="Custom Metrics: ApiGroups custom.metrics.k8s.io" %}} + + ``` + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: view-custom-metrics + rules: + - apiGroups: + - custom.metrics.k8s.io + resources: + - "*" + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: view-custom-metrics + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view-custom-metrics + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:anonymous + ``` +{{% /accordion %}} +1. Create them in your cluster using one of the follow commands, depending on the metrics you're using. + ``` + # kubectl create -f + # kubectl create -f + ``` + diff --git a/src/img/rancher/horizontal-pod-autoscaler.jpg b/src/img/rancher/horizontal-pod-autoscaler.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e39eae1bff88b600818c796117d9a29c949a30f4 GIT binary patch literal 38147 zcmeFZ1ymf%7B)J#%ituqd+^}y?(XhRaDuzLyL*7(5Zpbu1a}D#AS96Rh#fz<=gR-D z_tttnGkdDOt+jVm&CuP${lfh^09isrTm%3D0sw$KJOKC006_o*7&tgM7{mjCfPjF6 zf`^8BIFR7rVBmpBC@4TAAP^M;2NM7T3zP(8-RnOg~U*`f(W8I1iinPv$Y)>Xx6@%a%y;Vu()I4J2_z+c&Yz}(Bv zlpDH+25NpN{CHeML4Om4#@@%{en}Xu z_d^qmdHS7rMbo9{**63|oy9a++st6;hro|__(p{hYw?YJy>9@B_6t=v=h-0KTmo#F z%7nGd5Zmr=98gGisAy|3!-XH3fHCH_X-hh8e6DW@gvprt&b$UzgU>G#-x^@FGquYK zuI^ahaaAkjH`>nn!DeERisi;n_Y5ANeOK|#^4(fiz`Fj$0F(3+=CqM$(TQ&ehbrdyS#bmp;n`nbQO_qR$I>9RIY*N}+hfQwIDN)NdqGbbm zS&`Nhp@mPyVmVjKks!w8AM0=2lB8)NizN6Rhdl{v9jU6IKY;-ZJ6jdq9SeFtY%O07 zWTr_j4+}xO&ab@(0%=~j8?SD^|6%f)#zo>PF2F;kQB6({mqz&FlMX3{qS*s-*_k*azI>u=p(DWqnpaYMn`CH;4(YN5QX z<*)~FG6ud_W!QGS_vywr&TkUOROf zQt&L9^m&{YfP514fvmf}cRa|8`1r#E0J|tzDCMo{hxzX!AYFQ_2vp(%MT*t&tUISCvuTkGl-zmd}C3d=Y{gw=N7x zwK|-X@%dE;34-gD8^xQttqHG7G|udTUpar*+KScmne2BtX#XI??IcT?S62>W^mPz9 zYk@XHqVJQwI(yIt!IWplI@Ai?I-2x=- zhWVA@bP_*IJ1v*Hk#resMM**=b39^V|BEMMsbxFgAFcmQf4stZ1drAeKF#lU0w7vb zb(xRnFS;jx2mfscBvb7uiHqX~+xhn*=wpxBr`m04Xnzm?Z4Y}ks@bro+BaG5cOu`y znfp(|DPMAo$~ab$u+{3F2X^?4-`woI&Y#kWO9_kjKfBz^AtWsI$U{ME{*X@;>!?r| z&l7PrXGJACd*rOrL1YcP#&3C0+N05;IkCSpi2~s$GVWxnIReM`yYT+D1L-~%(5QePnZ59raYz0 zrS$YHjM~W6kydo=rNSirye{qm`W5v-MbDkACt1%x^x{AX3G8lU#{QeeBn+Rw4dHhT z_`?CM7aQ}PBP9Snl85i4F6xoWHV(`U)w_%T)C*oj8$ zfYjy#6s(qNvn}4d-Fg){eTqxB@fT#9!(#4Zxblke6(8WgNDx|iC3W>9b8xDx&?P;|c(1zD-H}YQL@;0Pvq@4@9D6y5+p#d;-@~ z5AaTSe5R7#0fq6panRmG2N!Jd@;S49g;lv13BD3#TM;I>1%>QtNuku9@$m417^ zm=rCFIP(qp!bFEjH4;-<2mIUX%*EiM)8wt0@{0Qsx>F@=Q$5w@WO=wi747$C=-uX& zN;~s&Wo^kNbbB-9zFbv|KrM~mHk~+~ED{@^@umFDB>Wo~(ko}L|M#2c-$?!ijD78& z{pkDODu2QLIb^zo=xIjqbD6*6f@GVqpXd1e9sB3_e>h~ezGrRb&jkiESzgAd%<*fg zLs%Q3e`}s=3IEdm9c&S1E4~ z#poWAQKoSi#vrpIx1t|O=k=4vV?MINn1cajO(!;R@ZB1@sIKn5?Agn5e?33@R7dF5a{=pvGV!}UZmz4_*V^wr zyp5T-ClGULU<<2C_G)1_e{TP`T#!hsrq`7vTj0Z3BsX2ClVEn-7O&z>%X}B{~P}# z1J55$5i%nK01%+y;ETJA zIS*$Kp&w2xfGWo6lB!aJgA?spgl0Sa99IV{(Goq&nf{5v?#ljE-9n@2Yha>d;Da4;APU`OX zN}R=3>R*Ze$sqDjQR2?*Y60aQz0|+uR1kcSE{tL&TbHGsL#eTmGxP0q-&>ukgAbn* zPj^0ks63c1Y;Rt9>z(w({mMml;c_@2k*3y=Bo7VqbU9?jkzmhMia zwl*%DYlY!&+O=Q5`$WAua`jI0b)lq(WAmF&G&)y@r`g#DI;XnTX>T&AG?z2S-yayf zuU2)sy9Y?TclhKvcX^=jX64l7l_X}ndu#FDN7dHkqus`0s^OwAw*otxoz^gK6=h~c z!Y~Jjd2v`r(yx&z&xwP-6I);HMuhvIZ4@Juz3Uf8LsiWD&EJm0POF7UY!@l2Qg))M zFfqgm!P>d3R)79SlV!GNW4%V^YD)w1Vm4~+ zN84I;>>T#<-VFDEUE>eLY6{opcX1s=-Z)fB$aArI>{EEPyAKIWIw*vHk&mS$m=tM4 zjxrQG0z;x*@>!a7WRQotl+cB|j~VPGwQ%F}?yQ$|H7`4Hu^Lz! zt>&T(oEof=m@H%*vb}23y9X@kcovTKjht3N4kMpr6kTlZe47&vBdkmf@Y7|q-~sN% zZfft}DktiPBW~^JHkxG0n~`BtTx5Uv9&4R6;s-y{Ga@|EYSF?d*MpgSuqNe8s|-Kz z;(++$j8``4;=Mz7qDG?M=ii-2xz^e34sa%5n$c^ed?RU@;` zn3+iZ6@wMp!^1SAPaQ2T8N4>Pm?<|*{pWqXiNq3!0s5az%}?L94o$%Wpp4EtcIY=2=iiEgp>;N zNJh3I9U;J8W-!BM1Twl9E~pLo%T&x5*qHFc%KrtULl9)YDdsE6Egj=T6o=&>$yD~-n|^Uz zsu|c*w)1+Thx!@qO9_R9_KRyW_H~w*O4NPLi{Mp3flQ)fxUb_JNU@pq3zLYmDeu}G z3A5W>8B2@1B41(56&6y;+yj)EXQ^9FNZofFEBc*jMj4wnaY0jxDbm7i$==Y(_mFE% zqsm8!co5teUA*;&#%eIgo_uSF#SPT}9onWUiJUp^XX2bN!4wtahDrYM8%Ha5}hzlcAb8O~l#Ho>~Bk|vWu#&?is@4&k<*eBpz-qc@ zQpvR4$Hx|Rgqc!5)%&9d&5PprV&^x!9j_EkZ%>cfo_l#0O}t;7cm4eFlXUjm)~fSQ zS0B@P*Icx>ZjY{5pLd+L4!oZ~w7Lg)t-3_Nt!A--4pXLFNJPXR5v^r{d4?o=W3xj# zt1M8Elp73RkkVdFST2y5AE2GPQ&wiKnD*WFJN0d$u_}T7BuXf1#`?;^%UDAe6wzMU zJ^{KypvaO;VR+I_xjLFGh$FpttKncygtGA(kX(V193`P=fRfcSBEYz|%uG`_#r?(O zVX|FThDBXQ3CXY))F|z|fNN5UREbvYl^0>>0foxXPK_$J#Tjd5=@PXRq?>2t@ie~= zXUw{f(L1~WVS|JY?OaZdI@UE*v{ba7Uxo^k;Ye&|Na*!L5VE$b<>(aVy@>+M+gX)} z3u9I{8Yr<0JQp3b&?8;`{-0UE$gw$IAe05RnkyvuzATlJcsvIgrz}4rj&1}aq4K_s z`{mRH*WCBarJ-=V7~-)?Z>SlaH5ffB%V{Q5R_}D0JQ@r(w_jonO0Wu7laQZ@C((Ub zp_LnxW)aV+!^k5@tB(`U3Op~cMb1!)ciPOKin6t56+d1Yx>@dJ?yL#ZR}m%gl4RYb zl(H1d-wMaokddmX@Kdy(jUi^FNw@hi;;B}f_@aFhA>ds`Jv*_%V$IX+zpeTOiP58h zWCfu}aTFF3v6)!n|D+ieV@aS(^u6dGF!4Wlb&J`>;gAHw5dQN!}W5FU8jydu*;bWHY`Q#X$P&?u3SsJ zp!u;nV(o8FU65&`>exgEpAd{e4PpSPvP2j#!%89jdL19doouT$dMAmb9zEd&%Z@*9^*R5WrW9wgbfwp|1SWD=8LRf20~)EG>9qPhwv1Qc}4EEz2ILo zJ9`hgf5LukFTlZG;q^-)rmAw~1*&F8AD1al?jQdcakbD6_)3mh(xcQe7*@gjFGX6h zw-|~i&BUhJA2Jn@SI6?JR}1%E(U3w+%a)rjYpX;8bnu`@YHR_pVi@+}G%Pf>Ij>|r> zW1wXb?vefA@QRyaKa?}AqI7fb`pV0#>dP41N{CW2Q!|T|(;4-J^5P6B7TFdq>TD4w zN7d!y5|S*@$reRIhS42G*=;r=90FWdW2Y12ja)27=#{6(^D|0Z?5nehJ~EgxXuU`B z(CldU>UlHwwmRjrv=IpcDUpOIm-xHq<4-?V?`m$#K%DPE9!z(8n>@&Ft?o4RlJC?bZQT^(3p3Jy)@h6&UIC>0(uA{Y@c z_Xqe_*?_^^ejNj&-zojdeXvBRdJsbw|$$58G3-Q1kQ7*3l!`DD*Y`>r51muS)#jX31)pZkgs&#PeRrRS(9lOb#8$ zTe=s6iUyIR4tDjN{hx&-WmPmw!Ms;MjWa{A6MS);+2~zE_m1QB9PY3l*s8wOpa9>H zKr;x>Cw=W}eDjCDR^6aWufFQlB}*AY#rP6p%2vR&jn`jgv=~xjE5;!bSNvsI**K5u zg&xMEz3W^&0QId5$rI>pbe&l|Y4w&+2TIMJUDTUeMVs%6uNwSnt^4y-DftNE49hKL(N z_oAHSyglh)9VkzK3gM@SQ`(^r=d>L)@S-_1a^@Jl-u1@PzJTwC-$ce3Vb*UxvUz{F$jr zLPFb{eEXYFPUvYVQ=oJPmDPr#DQr5MydA4{BCB0#-46_!Az%N&L#_}!93zGi&7gWr z9ifT9XkaQFBWm7@wZatJKjAaaU!L_27xamj#VeDQNh_t5vM5*-%>Jg!^O*0wo$W^; z?E{_Xg_rbNd3?#?h$0lTvmy@aA?=cNXrh)x+&1@GhD=oGD_625`2-SB;1Ak~)=ERD z_lQXIbbYPL<30bU)(Y_;_q;FN3RW?H_6ia*|JzxVH5bTFh|ll>MhZSkQNB+~NxE%% zy3b@Uc*X3IPj|3fz2>%-M^P#~i`7z!#H0X+jUA)g$$9udEfzMZ{q?$_-C^vibf zc>)9#BAG?qQTZ>3(k;;xE&`DLuelEzvb8P5ZQv?ev&Lh8MHG)Eg*=0#SWgP4Oxczj zlilhNTGjs%z$|3hkSv_B#YeCot?A~HPCo^lxt_9o5;GL6dX^)G8|#>(m5Bn2Lk5jH zx1^2X7*(B6B7`_h29zPG&tGwo-fR4-b!1NEOMhI=0mw%osx%gD+?t!yC5%US3Q7Y3 zf!Hv;Uzg!b#7zgT21~>pW5uyN(zY0^Ks>#!3Q9y#kJnK90Y1fgSu$_jzR{NMoyp^8 z{{%pwr^YS+)1z#9hK}I3yNrx(N`6FnOa-WbuNH7S*Gdawhs@kg=niaV^dsR-F zr__b|>{?EGRqSxYShxsZwGRjQ3o(1aZlQxj7>?7Sb0?f{f^|rtpb+=FPD0CifzdZ2 z0Q|!&dAqyB>w7?-D;e9fdw~1nuP$4p+!k!XmOZOPd|-|-9`Dzz2?u?oc{-)v5sq-F zW}DaJ^#ddtK%o8VZpxT-MX`|MTgZ9C)(Fsn&XDi58|!Oatakv&>67Gii0*Ry!Bv*{iUdAy6Z4Y)Gra(@Af;ZMu{E!C!wj!MTH38F8(v>t`LPuzpnR;<)|gsP%?x`I3F z!N!zUYzm*pU4FPeBidHYCUBT(u|Anr`|8>HY58JT#w;hBG4MoJ6*7-|$E_Pv=|~ka zU^el#5z1L-9DcWSv4+ftN z5tOYE22{lO^_jryGp3Chd0&tn1%wSK7596v{TTV8HlBuDBpKtJyFeWhVfS4zci_|> zN`N^7<1L*x)|l;clOHiQ2-&s;?fJckc$pPbH9cK_AU>fN(6p&gj3`FT#cN8JJ!1@RjlB0t@I-ui$ac&XeA;MW3{O>$gqCWV=`1_lNz9!jGEC;(ChVKf#Iy!lKkkWC@PJ4La4@we*YWoS+nsnE-u+)ia8{SP~EafgsphTGK(%G#}j&A3R%Zdz*Dc0E= zyTHAi9DPwp(EG$^rllgDvk1X&bA`W>GI0|d^%D_zCFyn3#S6K3`N&Q3b?w}eq9LJV zSR>J3(=P?A^$|}PeA7X?E_Dg6g~NUmmROkc7D7<4=2Ee=pKJu6$bw_My0uR^c+qvu&%&;v9;1zq?oNjHzKuV7YVns@YIF~vH#aZD=cj~t zNk_Or?KT+Pl+ZzlQ_L{!SAN`vU>Xrvl#_qjCWR9ICdRulp7@DX@6!YFJFggyPVR&s zbImIGE%{y0j8gfwNF2La8^SyLfj5ka&SCPFYcLMG@En?(I#fll`)U!^(2pXj3yr^~ zjB)@?TtdRcgw9&UBwG9=v434sI#G$FK)Ci{y{za<00TMYB`WD?K!Ni)nG(nckD^7o z=G3fZXlBxfuKhB=MezAZ+I6kuNg_rUl|tJ6NxVmUiIn2S<6V)yWzAQdsx-Ye%a>H) z90jgvPBFaur9S=34UJ5Weu}=FhQn0qIe|EPj~zEhSXyu8C9V06Va_W7N-^nA5w@eT z2*`5~3}@VlKUP+Qj9|`SLM365k|Q&ju7P1r7Fiy2fg|-W!Q!+ni-Xm!G)w5&a*1SzDaSSRsHQBjGX5%ISDBgqaVd7euRgR0@-?s&mA-R)MN+w3m4QO6c5?%cP z`M0Ev2?`oYn#z!C1#hgTr^XPT`sp{7nA*tG8kC%2o1q)zVT%`|LwAj%WP%GJk06r5 zWX$t$gHQ+=aVW&a0tv81ID&Dq(2_B}Ot~O=yxA+OT%|zLr{vT)pXYJ%@vLGt#-!PO zB3zw3hMW(?NivlDqw-e;ZAuC@Rl{XHnl+CRMaiv2S+i(PM2M*Gu#hB zTu|Qn(I9Ine+j@v7oc^GrDuopn!@{s`m%~ksjMMM{aojZRe#G+Hbu!`B4#o}IBkGz zA^ljP8eJeU!{0jkQz*!>_QQcCU8yHX2YW*mRkkA*2l{){M2g}CW6ImpG0i&Dyss&f zsgfYqsQT7YygrAX1$dJ%T)~#R$sJwlNXu&%qii}A>}nA%m`Yk~8ez;DvmQw#Ogq4M zbC5n!eRCP>J~8GZnki^DogWLAK%^7t5=xWf+Mu4VG2LFyOKUg@9WXJ8IW zj^8bPPTUw97aC-~=0sB-_M+XX-a%! zXEcq(D#<8RhONUijgBo^@Fsu8(H(#CDn?3(DWIPE)-9{`En)qE{Ge7&nmhX{0@>M8 zVHiXH>jt%!X$Bzh^uS&25S2%qU~8%Ozf$8TKa>00OA_0u_7 z(Fj?bSu2u6W4&W`H0=#P#1zYa_TZ;P$+(_16Nb{kTZHWRv2*mW$jm*UCKFRqi0K2) zGKtqNNXZNaI=utWUH2a9Gk?cOx*U~YuSUrWDvwv^8u;cYnp--O1@Ke#ZkU;7X9YLf zW+_2S?-M<>_Dd*2X3a^4hzI&uL|MHhRcGj;<6~{yWvQMoD4W9wIFS?%TSV6^mrfRp zY7=w7r7!iCa(! zl!SsBASSh^(__WUkh1J+)Vxx)l;?x_6|r0Ei_!dU(l`lp_SUZ;+feL5AH3Tn@2G6c z*HE9iUZ1PzsFrl+5>2h2pBOfHde^QyhK5q~o$4#iFm<`|F z1De{)MAr^bf*#x^BWfG8cOUN+9L%9B2nlHkk5uDBPe)4?7`((57wX0fofBKGLlPsF zU@b!@-q<8a{=E4Frbnp$ZA01%Ws+{Q7JWsl3)d`STBz%xg@Q+)z^x-B zLi1FW?g8mQ*MI{JZRh8(c;_&6(Eh3g?Jq9Q!!x*7ZRtwM>Vx#Ob4KCnv3;NU)6-3w zKrF#`w(_T7nlXW%K>nOnviF(8Qv96K5&hLwVErrc;<7-&3@*tC{Fu`v!7d39nGv82 z2BjHllmWwx_aihh!cE?D&ChWesS*Fw!(YWk1hZi2A3>KFui;sdca_be$T4qAP0?;T zrZoO?d3gAV`a5N&%4Zj~;d7p|Qb*cte=W3_ilhB-Ta2}~PfSYh=w-pkamgJ&$|tln z2KnIOS9y7{pQ>3`Mi-TZJp@+mwgZ5rqKDX~_kgrk@Z3CfBG0Y+$-ua1A96LrPST}C&iWCp3veyfYRImaK>N2X)rocC_VTzSN zT_&yS^ARz(-692nP;o9wc5tr4vRw3tiHygS36fDg=M?kAJbK4kOeTe_qU!l=j&uYo z60pim#Xcgr2-3)<+->L4e59RI;qGd;Za} z^zcM^Z$v&c#3_z*!O~&NyRr%g+c#YRgcSP@8(5_&YQ>9M;QT%~DU z=`<1{NK5XEF1y6HsZGpC91GN^54K3NEJ!5o7VLT*ja+Ni(yb{2o)DnB|KxMt!uD%gef*$yyGtldH>wV&_KZGIT+!K+jKfR@&DM_kYS zqzhOK*`&bPJl{fQLa-Vh>MN11H7(v|F^zl0Fe8AZ{^X2eVs7#E_xWmah%oK2zvGF5 zDA=N@9CK!>OCA@$^rlk-G&6525l$ByB~3u5kXVMUh>DctQ%rT*@NVyTRialk1G_e| zc~)o)nOc!j&}(0kn3R(JVq#nwS}SmL#LI1$!4Vj&Gvn!ZrXV-%_4QKRh+X0^;*M(s z^M2DVWLj2HWwA_Bmk{0cCeDLLG-s))`uII>7@eQ)xvTNNfRmUt93myM5C^laV|f&1 zsZmqXi;9pPA@92Aj#twVZmABLZF~T^A#KzWzbqz9u}ZqSTI~gc_BBv}zCp-&Owete*> zRdZaLY1x>Vpi|d{)^kv?RFklcJYq~jP@oAWzRnnR7}`N>x=1!kl9S|da=9^tM>~Dx51txC)GNO@s-zmI;~%XWpRRXegd4+%Z@a` z?I&++))$iuVJ{SBa&q0B}O8 zUMjp+1;M~xm$xp{IBYG}BM}Yn#Pp#Z4&#J(h7}j5?*Y}zO+QwbecnAl)9z{Rsynu5 z(5{Ow&sJ1#Q--458C?89sy;Cdi9Q}GIx0$lt^|(qdhk)x$jN5Y;u@osO*pd*|tVHGA4<4S9W%^JcaF@p6OzudF+c-IhdN)RFGL6ZoAj9EH&%%Z=2DA2VYi-0m-{+^oecCEkG})@NxdJeqFn0;?oXnhC0}rsq2n-+4(`Nj-|pG5u9& z1w*PyvN==PJ%ZAbos^V`UZ%EXp9`vn&A_f|h^jwZWi(_o%q4vOI&Kq(Y}%BF>2=c& z8+HZMO$nd2B0l92gzk=>n(jbm*b5~TB$#-vuLR}my2V}W5!f-WBh} zd1fEun06huJCHukDxVK{6~OcSf}q3}T+<}Nu#z2u0Sb0oV=7WLmV6-f$Wxi7{DjD}Jzl*?RcwNm zz(<}g`oi~sLHcF6(vWa2*^zNPwrU9L^4Ai!C2vFU-SDrtyP-5>X-g5zH3P&;&6O92 zcj#yc=R&qE#S^;tI?U|~j2tkb)pk#Hq@7OUaK!0aqjWVUBD&~b7MiM!8{tKfqmsUq z3y+9k+p-^3b;K5lXQEFO5@Fo(G$#qh%g;kz(&{LhQ>65djPO3aD^Q>w0s@2T)3k|* zWU--@3(ckbh6i})d(Pg*)782}3<~xUky3`66<8$A^1a5!SagT6W~BVs%SAd{t0|IN zb{8c~`y*Z*J>R+b5eq>Ll|wlz0qq&{%1AAd*Wv!b`lszsBPM6;&g-IO@ zRN|SA=BEi@uicDQ-y}}P2AibHouRlyDPkQaxs`fsw25FG$n>i2N1n&#KcT(34itQa zu{w3Zq%{%o7_zj~zkHNZ*R!SX@i>;XRHCY!g}OQY>>~^29_Cf!htn50S|~P1OrDYC z0enJ~FX^zhizlOOOKdx{C`6}AV~0hLkrS*PCGgmr33Q<2g55ADYg2;R_Um&4Y(u-W zde?1*2nMIkh|%0<110&hI~gF~mD%8Gno2eGnW3l4Lw76|BoOLy8PhUdufaG|GP#C{ z)T!WdvVBjO)dt07Tp6YE3cEdgVwR7ooP7B8E82BeKgGUrnq8&P zszOYDS)piyC}s(hSpDomS}I$#Tyv*D>0qD^5vwh-F}3*P#Mc_T>B|BCU~l}kfr=h| z-$}VjEHt*0aaab^KSq3f_FO(&X5x_PVDXcP7bkDYM6u}=_T};Ce4fng8PnHw|93gH}jqSwFw}6!i5{HXZVpOV<$44>y`U9AB?w1Av4KXZ-T|xm^VGe0ui2vFCCx zrjR%6a$9Ydq84(_MqU_kmcj>Nr}>W zrDtJLQ}RnxoWi1JB`4^(c@IPSdk9rX$f=i*Jl_tfb2RV_{hz4(-Z15g4Qef0x6NP* zUZUbZaQGW-WxBA=#;3zp!2^3*j{T}V)aP4HFn0A;$TxRKQLe)`#@<{uzH}Ioe)#yA z_7m8;@YMs6lce2KHG*63s@S)vHxh5oTfmz%SfM?=od>v#JsDr&J>^S=6X`;V9&g%< zs18!y>fcA%Fk2uz|1ghpo_IEV=ei22Mf(_>hnHi?1TCraiKggCJ)D)y39S!5#tez* z8Cp(Ds>wYdqT0*|mPV$_eTe`}Dl${kfM*bDfn5k73eHARgD43%Yy^94B6HI6#hxZ1 z+PMCFWlp|l8e%Wl4cIB3=-H*Znd~}&H3rPmw|l3JIgxVGcmUSei!{7rEN=4T)74b2?Atzm%i0nEYCp z?cp|(gA!5^aw0Ih0Iy`+^kHUb_cpk`R?0}k^l|n)CYaE5q`c?#4_7iJSvt0;mtBe2 zeKeq@d5rvf;gNm1Y`glwI?r+|bQgT@@PJ7m4x0{RNJ=)-F3Rblmoz2-V)ZVeR#4mq zO1-7(0Y{cg9|n4(k7!C2b9#`Dd=G!lB&}y8p&1}EPD68zD20zDv z7n0fZrHvyofZq=DWZbWbUIHBtY8HyEqh%ff$c~!1n+Yr;sqwr24iivXW)+=T8V006YP?@f(8A|hOtoJai*^O zvE>Mbg55xvKOqJ;5XF6utuu82LFy~Y1D;F_fV!YWaOEwn77KZchKk(Dy`Nn~05?_; zJ1n)QH#FSxZf*ZCq zcIuP8vP!u30MZ=erSs1q?mf17`Yr%}W<4TDGt!s;Y#D52F?L|85w?q8 zK!%VI<1phM_amJl`t4`ZpN`rhVkbw5HQ7jcAlZxUE_1VLZYe^{O-#|W8Cadskl>hd z4@U*&U|AN)nePF%{7s6e)FCywXT3dcQP{aOB?#fAQ&WfCF%_Ut9e!N=o^$wvoCB07 zStk5lGD5F`35-G|g)j&+$lQSu28-Y)o=GloK!RYqGXqe7zdrQgh#^^Zu@EPuFDK}P zAH~9BvuPe$pWILKFq>PjJn_Ou8U9y#U@68Uq(bYHh-P{0$#!pGQG_FW9rHHYD5C3! zVXk8N+H`%vH`v^}Auf?)k+qX}+w6rZ1-rm6Q7IB{Lpo23Gz{jgCs{f&L<6wx!5O94 z-F!glD33{fQv$A!+BRFuyr>4px>6yzMRI5ap|Lj*&SSw{>>=B^W*sLt!}Pc_^EhA< zjNaFEUGY{FSCmaW{0y%|tY%-(Zx8^MS~CsJBn$G|zt47bcOnf)A@j)32J(4GKKue``xH|;FM)z=3YRz)dVynscPPk=ua1a~cYq`ERPRn@ zf@PAuBf_-4*Ta-d=Fsdw37^Vajt%#DcewAdHRl%H;KP4MMY`d9cfIT>KZe0Zchm>A zoj40^bn%gbaVG0xf&^nEz#uRe?~>4+Xs!e^M92px3ECG&kRN)zn^pH?CSyh);@&3+ zW*@7L#b<(9TNoG{@yy{(f>68(saQ>y?~zMan0ezj&-!`pbsJAL4}d)z}qC$ zxf~*H{FoNfc@M}ZK2Z86=DvDFgLNZ{?BG9kG*$rtmjxY)CxeMcgh?`gKNeO2nsO)e zZ!}I|N{G=i4N_P>5XKNxU_=OSYELrMH9XFrXuP~>QV9a&^2a;_Y*&Da5Z_F!>o0PS z_cj4`c=ciq#@OG2z@t%)K)(6z?l>IifQKcVqLb%4X(u%$rJ`?!Q+K$7k(BZjua}db z3ByB!_Yg$bUj%$f9lsMAAF%KBUVC!n!`YUQkKbmmrc+3~+bf@};TkF!$`iM$GR?d>uVbXK$KlqB>s zq={J9H%k@CP9>%_k@o}Bg~$Tk;3AqtEjWmILZ4Yb7M?b=rb~Nt zdes|42}fl%+(!YRqky1zM!$0b!!TF~Z14=eoVlm^&tCP%atPDdF95awu6#Mq4lRTfPktY9L5 ziJ~`WfFlKDq(Zn~(;ciW2TTnW2x3%`o}va?mk=KW4JYn9p!cK{rUS%1oQSe7#<1Dx z7Ci99LQ5e$Rtvr~N-vyXod<=sDR`*xEm=1Zl>nYes8)hv-@Pi`GR^CqK{=p^3mbz0 zl$^mWz>H5svIcAU1-+9tP_2(RS-!Ou2yn-p{Ylsn9Q8lEFznF=GKrwpt!%kNTpFiD6l zXx+q#MhOf064K8+AC%R5Meaoe?BuhEL>?VusS3o-5iK!FJOW?>^yyo7k9 z$lCy97~ydm%Px64+w_=}PIy3#H8xe73)lCVxMI>m%!4(gy!A`U);rm0K=#AaSoSR^ z=X0wCb%NRv+#g;3Z8!W8313DAcChv@DF!8uqk~&iZK$@^S!#djVR1csQoj79E5vWS zI$^(c@*C)f#Bu52B&w%|X2$op)+fqwqySDrdS`I0lwM>E8%*DdGv0<>0NG70yo>Dk z+>YpUzof{(Gz+mW-RqtHa=NqRYhE^yuV2x=5noX$%8TD2S*qX~fvHP{;Gi)$YLW4kaoL z&rsi$QoW4slpQZ9o8txH<%la+JB6q4^P?x8%u%LS_94iK=SSMpy9XeXG_^bDTlw`E zrB>n3X+*(z#9-Nw?9IM#nLpo zzyb>_X~{uda*mRO70Hq_NS2&4D2RcKOAZ2(C1=SXAUO$0mLxf;2qH;DK;SJHzXgWKF7p>hirb8gnslMT)Yp3^KNr$;HzW~ zR5ii1{u5<|oINu3dAb6CQ`e2JFnW*hnrD6N3KkJ{1p%NCi1<;)JwUCi`OyWO4>`#} zVX`+ab9>iWPhJ~_LPbOc8i!@PTOYPT+2;YfSiGOb_xnC$*Q4U8fw0gYZ&&m^cRBG& z*c6bE;p;0@pF?F`Bw5i`hI~|a)R)bJlLlzF9ZluK-!7r=prwWDTt!S2nQH+k#|M-G z`e&N_E}G3Wb1nz=3LOFgsFCPC_|LsK$sgchq*~j{MADP}{pI z>|{*RV&TuHGQVK#4aexba0H~T=yY7XM~yj8fSk2b%bvxD8gLc#UL7b@fie!CA<*vt zhv2y@q<3~g<>R>d$>brb37i-`+$s?w@pQ(10~t-Sy(I*+07wwZHt3-rZ#$%~O})Yx zOE9$Nv;y0AT;6r5mqqWy%`sh5P@|H>6=F{GFn3LlsPnZWY#S~U1vrB$C*nmuh+b#n zt8C_1YO%Ql(D$YqkpIM4P0V1QDvJ`jLNjxPX6!=}+5nFtyJ`5Vo$5fQf!zJM4KSW0 z=pDqKJ>HVk9B?IrK2L?-{9y7)vW!OI?YTuXdBM7ZCuH~$#JWUL;S)4TLEh*ARt0Rd zGi-*@AC>(E@Y6#mB@*eomMjV2o37&xmrRjOU=c16zC>O5!4NEog%m>|dTt#db_BBp zEm*gOhPY<{tE8^hyJQ({-j8Ghe$@+|ejl58Zed2b?J^k2_ycnMnEk1LC!KrVGEwg1sz1zG1Qx?i(lG$GxqS*PhOzuqrBCsDap>QaI|ei&rbB-oyX&yCi*L}%nLTfQEz48- ztGHtTVu9J&ad^;w?d<9V+OPX5ZO`+3(TdtnZNCH84$hY!10HN6ZUN69N5wy@7ruGE zU_bh2B|m_pV?aHkzORV?*}Cl$;$6?`bwrFPK-7l)P0r(EKzRMT@MFMH)=$Ad0EC>)yO<8S@)b!Rc@?lIt-OAel}tSfx? zhD$%psUNoI`SH`s+2^zW_x*6*Q{R1Y9(Cf7?+M|Mv*Y(pIV=C^ne(VW={vK9+x^!N z-u-;g&v&B0(RRPUy&tN;Hj}nzPZV22xa%kKC$QNCDrdkU2Ow|aKVe-9ns;>$wgf62Jjc;-w5%T{>Z zf5}+b-{}9yz+(U(_$6AL-&cW0IUz{sXnAf6~9 z5ufHj!nyyl8Gn#5{s8{V?)HPM`v>rA@Ay|~TjBph#*9C#2oXh3yBhRchx=E>&ImfU zzJ-dDoXPbJ|8ELxKwCgipKl36;}6UPar#hl27sZ#lu#cHrT?B084UG)N>nBa_XI#= z)c#GqGeequL9``6FwzTkU6r{U5a8zqQnV-=_aRQz9PB zXTb*`Blg!pJ6X+edLx2(Fdv`G2#?As4KV?rtg^xB9a-A!jQ9SrPn#q&`twWB(J4`z?yS%X8bD=I!&JBfdV4>~F%78DnI4)Ostoh#MyqG>`_+f@5O?9Hsl@6Gmq0WuhQ zVVGHVcdJ&tj{&%mZ+PZ3SZt=B=}{P<^3teoQTp^{CSQ*QW#k~|=^Cun&P{CauwO}+ zP?0juT}7*02^3`_Z8zBD#m70z9zyi)p!x{#B_PqI3#nN~i4Z3b20)Rc`QXKs(5V_jz8>G-aZ965MMF@RfYF`qokC43|5QNa2U zb{_A5B`s-LJFD58zZx}(FBk2@Z+dc^7|X<}!xsC}tn9UImF{=I z1+nh)+uk9}Cm{+6?KDK4uD3b~QHTh|$(2?LMT97)K@soB4X7HP;ty*zD+7%R}rH7E+9XRQf&-MW5u@k?T`4%hG{RyEQm)|i-`hW6I=YNWxv?%PuC#7m7>;t zM~UeTEGK#^MVFaHHG8RD{X8;eGh`$5Jds+rnA%ZlUl$=Nl8EweGCfn4k;H|8{l}WG z?&(7&3#=O01MNmCMpFY8xXJwxI}p6{pg_03yG0;8_YtWk%7SAxB+Hh`j#_52m#0yx zNDnb-6}YYBC^T8d5z*NIr#VogshvKN^Ry=Hvs}^kC#!~s*HC7*$W?Z+%lY=;#tJXX zqgfx-K7i|%K3fRTdjaz?5(t=YEU2XHO~RsH9oi3d@noz4%`WX=i{D<3oz_)o?voo6 z!taxow^w&AgLIno1_!T~Crzg7-m>FIq3qCnK=j1lVqW{pUx2wN9nT~|x@VfN9L*X( z#B+_}Sk&(RAB9V#oZBGq4 ztz1m_?rwb3@9}Oq)n{ryWJXfw#qJId3v8{v>|BqrsS>S^xCmOeVgT?%1<|R_7MdbKG2li)Cmvja8Xx2*M)e)wsh#*jCV2WkcPDg1hEJw@D%sAl`Q&B`zxmPALNbDd zuF)|{ZRC>^N^KVdI)knr&I{YkrB$1tlfXESz&CY`4k80Vcu*1VZzty;L4h3)@zMko zWMmA~pQ|wO0aRRwMVK3SoF_{#(@Hm?BF%&I%8sdCKUZNQ0>j!B{$oH;h{mkx1Z6aY z?(^*f^Tr2<2fORDXQzF&x2won(uVa7&j`(&3c*;MN|X10B|%(`kuFQ8!fEkC`|GU5 z?Wf~KwXVvd&D!*z$@$KrdXZvW9z(*3-F5VsOjwHO@0R-^hI( z5*8U)6~y>t0-bHNE=>E$kMYdrZtEXSF3)EX1{+ax#)-zh>o*!jt!-lX|+-3#lv+xVCDUOmX@bvtm#xW9T4 zxALvf4U|LmL2D2h7*Y>GzCmzx!*n$9-pI2IJ8S%_SWG?!`?NZm>e6B9Y6=O9UWmOj zgQQi4X63#~+0W3&=hebkls1oKCY67T+PPWeGStJAg#k_B9Q;ZeClLu_f;P!UpcDJl ziYAuy$WJivP)kpG77W(Lm0Kn}QrDP?X-gT9<)RWl zhUja>dJ8*tF!N~|JVX)OtV&daR7qbz>nJcGvb){Fh-?&mWTW51%+NImk+Bj;Ww-D3 z8#Quw-sCbUU;kA3&Eo|P;lnxP2l1@s1cF1)_nNIG?SVW)j7JgkRM$lVr}6VsqKkOuCwd$M*yQO#a!clNz4H6wG3n}1(+DpTnEiMoMkK&9q26Nl{g3{4BiXiqoR3jN0FB8_R-~szYr=wv zS=NQs)5)Y+s)goC3_54eCZVcQNKd-rQetSn3(?(Z5KPH>Kwdk6j1Gz#b7!K^y;EJw z^(wYmCAq#4R#-udbGZS2>-$gkSpd=|FM6r_m?)tXpnOhdk*Z|Iu&6Ym+54!Y{#1@X~<<#+H^ezYzsg3i>=A*AyClv)l~zB8GLEW5Vq`gHLM8a%#ql=Vb`R`|dn@;<8u`ZFPJErbnTSBCz6p^hN9OQQ-Mez3 zlpV&%r(TMTMik-2AfyVW-mRei`t(;O)hh!-Xi;YLuKPm}W!E_)YF_%x-h1C<{Yr@t ze-H5}GzzHRjHKQ$8+b|n0q0q=Y z7D!;?&ehn6b7$$udd{s)rcH*##-_UJm$uJYvL5g#0pPVVV0mp)qDL83j6Ooznn~Ky zx9Rc{op4vc^n2k|%X%z;huGbjqRys5YZ0|-%Sb*rBB(NpnO?^TuMq80(^&#=Rz}g% zzw}9a`ruVz9QG%#GGJ<*)~E$qhN_s$5p#y<6;q~wR{8@@JGJ>WNGukaMMVf^A18_MW5T?ght_(|x&G<@0XlSiV zP-Fgt;i?a$$BhWp7^O7#*~69qhHRGr+aZFiB*y9IF}Wk&9K>EA5Hn79v$CCPjjFlO z7;L#i1aQyRF+opFH;Ko7Gds8o^-Elum55f|dvk*~830g}(9VWBg_^j$1Z3WX9;^3b zWEe&*dD$eF@2r&Y&UH*?jL94-6sc0qjX$7{^4dTivsg7~r@pHfX;U*)(XT#5S`cRfp$ch5The87ym{ z+ZFaUdZ$#gIG%=zm0>lJKi#NH00ld-jPxF zPAJGi!51N4RfA<@thz=cAfWd56;vXPO9`fa(= zgk=5*oLw!WD45i!=jbihLdpoia;am!alIELKB^R;wqX5Il-7HR=a+>=wI}C4 zyCLDCxT~5LPQ6KQG6}yoBoA3b3mBQg1RFNtc{VRq5pm>@>&^GF`|@1#frq5F>wBTy z%_XOem8*uV7lvh!+Yq=om1$?{xG2Vh=>qL_Ol1LhQVVKOG>uaEtESIxn<9U3k-Kn*{wy-~C&OR>!z;axBC7*xSNV<-@oXC+ojLQ0+>=Q^%Tizz~ ze*kpnk~d@+xs%ui@Hn9A_n?vT2~6psk!-Tm`KqBJL4&MuHAv7~l9AcB*O|x(B}Z+T zFLI_9vrNbTo8Xf=GlZy`#X{B+Oyj8VHW*)6r-Y&4KUU;LYZon5wpFJf`VRnhICH>@ z7;%&CYV_~V=rgg?tZz9i_%S z96^G(HMPMP`G{XWJDmoSPISAfwtgfY=J!@e{gczm6VJRFFM2dcJ*WoZ|KT9QD8oR1yDjTKxw>{0q%xSif1(E z+6?7<_28G-K=ZTvf-mVO?Z%>&SFw@ev-G%l_>1dZ2Ln1I|A*n?wiD^<5K98H+f3y+ z4<27QGeJ>*1xwo^O$lJyK6$IlJX18Zn#zIZgTzMVDlH5mYQF8n~3pTw3R~`*ue(3Cb$c?p}V4&n705 z+ugj*HoEBEe&kTHhlPQlO;sZ-u5`R7q&djlZ}@#}pNfP?Z#&7EHdzm9N)Ks~3AF50 zatBXVg}+a8Vsql$EO;UNOBa!Da*zHz@EAnKZ?|kbWo;t;7U$?R{aK^ zT$FymW?&FNnh3yWp}UAJ95D~pOVkZzA8+sAex|!eI663>#o&`~Cz_txgC}6 zdEud^vfet2SNmZqmE3NJRf;R0?9&3$~O$bwmDcgPeM8y}CZijb-5VIwLXCYCgn=c#jO0$CC-onzIOzM&XJoQ>M ziik~1DOF|+G!VotP{GOoB!l^=rH;}aTU;#aA##kf$dIzJWpJ2wl-Yv8ns+ku$FPM0 zeT`8{h3i9-t#C`DN`N{EpI!qC25M>xgZrWO@9WS1xNNBxp`eu= zL#+=PmZ7C7+{gwgauRP$ibJCKR0Kh7~|QhK#~UU+i5smkWsNW5sQ=$3X>6csuJg#;SGfv zgXhSa+)i}N6^T1oYO7Q+0i^6n<}4do6;<~ySo0NogoqGKKhyyUkW+DHO4Gq>*w?h6 z5$}oHd(0>Yc-rzr$aP3DV-LIOui~4JGBWsn`hv8T3PDaq2Jj%yYW}vh3g6imFx>Ls zS2_EXT0w;nX$7mwd8X{VL~MnISUcE@m=k?llnw84PyBn?~lR)SLZY#_YJNkbxoD0d;8piTg)-Wr25JR0NY_fskdnw>xK@_o&||mPu|P%MKnn z{tyKnIW97h01%e~PG0Ttp!dQ$A^Ijak+&TcgU-eImwb7fvm^K1c5Yrcyz#(dwD;CsaGWB&u@vkGO+EODa59m&*gDUR&jF2b$6WD`g4xg~zO^sBWEwhF&9 zhPPcr^-fQ`ulixK6RzOG#aq8ey9QyjbmHh)tV*)IuOf)`f0b@}N)EI8Zxp{Rp z+TjT0(YjQ9MQht`kxJti9Hn~l&#!O^W@@C40drS`KiN#m!<0PUP#goimJSAszccN4 zD|3(MK1lArA(8@&&9H4_w9=I`v5v23#xp8`EIwLB5ha|1sF)g>AF`F(A0_i{rJ-*a zr8p6M+n% z-z)w32f}N)s!8$v{<+i~7>F-V%M-As4qid5fld`*1txIiB1lzhs z9l~g@mPm~bPwf^r^{bc^>L*$j2|FtixqU7{v!|K? z451C_kYNI3%MQWm;zNe7u;W3wQkkc22sjSObHPO@#5KniID5$`fUgT0!eUr*F|@&7 z2$LS5Z^4yG9=627$BMB+YT(I;_Z}wCS8Wp?_L530`LIiG@MxG1c&nf$+Q=5gUO*Q$ zRY+fxdr@!T0@4m~2pI$92FXtWBOIivj8N>U-(olQQc*#Qi^sZlGYlq-7VeHQ&OUu$ zBQsQyOwaJ)&Y3AYx!kU{G_GDOR(XIQ-V^wkPDLdW#s|xx+C=EX8 z@@5XoDiyI7>%PYPll)t9x7;l2=0bTm#y4{cq!8byN3@?Kc6mP=j>-`)Zwx;ytY~{j zRIG7Mu~jn^cBT$WNp*hL6iL#kMCHgWs{t=y7o3E5D{t0M1}INalS}z9f=LZoCRlj* zgIK4)Blu!ahl>Gs!0`PlO{Ch|yDIPUkT0~#qKW|DrQBP7x#geX)eC;D0^mQ)U;v0xei%b@)*S59;h5BeddSALIh;O$Zh!}(@hB}?-$?%LFtp-!`( zL0QNv`vAG9>Da|XDgkN#ZuPO zHbr4D=S_$Z1$Dm&s_+i7Nq9_UD`kKR3c<8eiop@UL)r%3Oxg(4oWItEJlHqx+ z;GB0yz3ULSGe)G8iehlvUa&$EZt6iRJS5epx=WU(2aRTo`Y- z48T>b2~G`rYZ-zHvdiC+%nXS3Sb=>7U?Df-^TZw{SmV>O@O+N8S24a)7D?>ufyVVM1L*97HC7e^Iut!A(_uGjO9F!p${p zT@gqRl&i<&X5D@DK9tt|S-O;D6I-r@ZK literal 0 HcmV?d00001 From 576776fcd8f9115c5fb63074359047e5f9c69745 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Steenis Date: Wed, 17 Oct 2018 16:35:16 +0200 Subject: [PATCH 29/30] Cleanup helm/tiller + added verification steps for helm/tiller --- .../install-rancher/_index.md | 2 +- .../en/installation/ha/helm-init/_index.md | 29 ++++++++++++++++++- .../ha/helm-init/troubleshooting/_index.md | 2 +- .../installation/ha/kubernetes-rke/_index.md | 2 +- 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md index 31115d326cf..27e7ecb2023 100644 --- a/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md +++ b/content/rancher/v2.x/en/installation/air-gap-installation/install-rancher/_index.md @@ -64,7 +64,7 @@ Instead of installing the `tiller` agent on the cluster, render the installs on ### Initialize Helm Locally -Skip the [Initialize Helm (Install Tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/#helm-init) and initialize `helm` locally on a system that has internet access. +Skip the [Initialize Helm (Install Tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) and initialize `helm` locally on a system that has internet access. ```plain helm init -c diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md index 5fc3a6fcf6a..48d880bfb14 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-init/_index.md @@ -3,17 +3,26 @@ title: 3 - Initialize Helm (Install tiller) weight: 195 --- +<<<<<<< HEAD Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). +======= +Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. +>>>>>>> Cleanup helm/tiller + added verification steps for helm/tiller > **Note:** For systems without direct internet access see [Helm - Air Gap]({{< baseurl >}}/rancher/v2.x/en/installation/air-gap-installation/install-rancher/#helm) for install details. -### Initialize Helm on the Cluster +### Install Tiller on the Cluster Helm installs the `tiller` service on your cluster to manage charts. Since RKE enables RBAC by default we will need to use `kubectl` to create a `serviceaccount` and `clusterrolebinding` so `tiller` has permission to deploy to the cluster. * Create the `ServiceAccount` in the `kube-system` namespace. +<<<<<<< HEAD * Create the `ClusterRoleBinding` to give the `tiller` service account access to the cluster. * Finally use `helm` to initialize the `tiller` service +======= +* Create the `ClusterRoleBinding` to give the `tiller` account access to the cluster. +* Finally use `helm` to install the `tiller` service +>>>>>>> Cleanup helm/tiller + added verification steps for helm/tiller ```plain kubectl -n kube-system create serviceaccount tiller @@ -27,6 +36,24 @@ helm init --service-account tiller > **Note:** This `tiller` install has full cluster access, which should be acceptable if the cluster is dedicated to Rancher server. Check out the [helm docs](https://docs.helm.sh/using_helm/#role-based-access-control) for restricting `tiller` access to suit your security requirements. +### Test your Tiller installation + +Run the following command to verify the installation of `tiller` on your cluster: + +``` +kubectl -n kube-system rollout status deploy/tiller-deploy +Waiting for deployment "tiller-deploy" rollout to finish: 0 of 1 updated replicas are available... +deployment "tiller-deploy" successfully rolled out +``` + +And run the following command to validate Helm can talk to the `tiller` service: + +``` +helm version +Client: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"} +Server: &version.Version{SemVer:"v2.11.0", GitCommit:"2e55dbe1fdb5fdb96b75ff144a339489417b146b", GitTreeState:"clean"} +``` + ### Issues or errors? See the [Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/) page. diff --git a/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md b/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md index b050fb6b003..c73013b5cb8 100644 --- a/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md +++ b/content/rancher/v2.x/en/installation/ha/helm-init/troubleshooting/_index.md @@ -20,4 +20,4 @@ helm version --server Error: could not find tiller ``` -When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm on the cluster]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/#initialize-helm-on-the-cluster) to install `tiller` with the correct `ServiceAccount`. +When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) to install `tiller` with the correct `ServiceAccount`. diff --git a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md index b1b30c8b728..851052f68c3 100644 --- a/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md +++ b/content/rancher/v2.x/en/installation/ha/kubernetes-rke/_index.md @@ -118,4 +118,4 @@ Save a copy of the `kube_config_rancher-cluster.yml` and `rancher-cluster.yml` f See the [Troubleshooting]({{< baseurl >}}/rancher/v2.x/en/installation/ha/kubernetes-rke/troubleshooting/) page. -### [Next: Initialize Helm]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) +### [Next: Initialize Helm (Install tiller)]({{< baseurl >}}/rancher/v2.x/en/installation/ha/helm-init/) From 1b8ff134ef9010200acc666e843368bf41413c45 Mon Sep 17 00:00:00 2001 From: Denise Schannon Date: Thu, 18 Oct 2018 13:38:31 -0700 Subject: [PATCH 30/30] hiding upgrade for rke-add-on install --- content/rancher/v2.x/en/upgrades/upgrades/_index.md | 6 +----- .../upgrades/upgrades/migrating-from-rke-add-on/_index.md | 8 ++++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/content/rancher/v2.x/en/upgrades/upgrades/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/_index.md index 6c3db9da3d9..109c96c1bfa 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/_index.md @@ -15,12 +15,8 @@ This section contains information about how to upgrade your Rancher server to a - [Upgrade a Air Gap HA Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm-airgap/) - [Migrating from an RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) -### Upgrading an RKE Add-on Install - > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > >Please use the Rancher helm chart to install HA Rancher. For details, see the [HA Install - Installation Outline]({{< baseurl >}}/rancher/v2.x/en/installation/ha/#installation-outline). > ->If you are currently using the RKE add-on install method, see [Migrating from an HA RKE Add-on Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -- [Upgrading a High Availability Install - RKE Add-On Install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/) +>If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{< baseurl >}}/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md index 5bf1119c27a..1ee9b871021 100644 --- a/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md +++ b/content/rancher/v2.x/en/upgrades/upgrades/migrating-from-rke-add-on/_index.md @@ -1,8 +1,16 @@ --- title: Migrating from an HA RKE Add-on Install weight: 1030 +aliases: + - /rancher/v2.x/en/upgrades/ha-server-upgrade/ + - /rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade/ --- +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. + + The following instructions will help guide you through migrating from the RKE Add-on install to managing Rancher with the Helm package manager. You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) installed and `kube_config_rancher-cluster.yml` credentials file generated by RKE.