diff --git a/config.toml b/config.toml index d83fb46671b..6ea4f5e3a0b 100644 --- a/config.toml +++ b/config.toml @@ -5,7 +5,7 @@ title = "Rancher Labs" theme = "rancher-website-theme" themesDir = "node_modules" pluralizeListTitles = false -timeout = 60000 +timeout = 80000 enableRobotsTXT = true pygmentsCodeFences = true diff --git a/content/k3s/latest/en/networking/_index.md b/content/k3s/latest/en/networking/_index.md index 549096266eb..95cc646e57a 100644 --- a/content/k3s/latest/en/networking/_index.md +++ b/content/k3s/latest/en/networking/_index.md @@ -36,6 +36,10 @@ Traefik can be configured by editing the `traefik.yaml` file. To prevent k3s fro To disable it, start each server with the `--disable traefik` option. +If Traefik is not disabled K3s versions 1.20 and earlier will install Traefik v1, while K3s versions 1.21 and later will install Traefik v2 if v1 is not already present. + +To migrate from an older Traefik v1 instance please refer to the [Traefik documentation](https://doc.traefik.io/traefik/migration/v1-to-v2/) and [migration tool](https://github.com/traefik/traefik-migration-tool). + # Service Load Balancer Any service load balancer (LB) can be leveraged in your Kubernetes cluster. K3s provides a load balancer known as [Klipper Load Balancer](https://github.com/rancher/klipper-lb) that uses available host ports. diff --git a/content/k3s/latest/en/security/_index.md b/content/k3s/latest/en/security/_index.md index 7468e909504..f8b4285fc49 100644 --- a/content/k3s/latest/en/security/_index.md +++ b/content/k3s/latest/en/security/_index.md @@ -3,7 +3,9 @@ title: "Security" weight: 90 --- -This section describes the methodology and means of securing a K3s cluster. It's broken into 2 sections. +This section describes the methodology and means of securing a K3s cluster. It's broken into 2 sections. These guides assume k3s is running with embedded etcd. + +The documents below apply to both CIS 1.5 & 1.6. * [Hardening Guide](./hardening_guide/) * [CIS Benchmark Self-Assessment Guide](./self_assessment/) diff --git a/content/k3s/latest/en/security/hardening_guide/_index.md b/content/k3s/latest/en/security/hardening_guide/_index.md index aa140f48ee3..53964eb43dd 100644 --- a/content/k3s/latest/en/security/hardening_guide/_index.md +++ b/content/k3s/latest/en/security/hardening_guide/_index.md @@ -280,6 +280,28 @@ spec: name: kube-system ``` +With the applied restrictions, DNS will be blocked unless purposely allowed. Below is a network policy that will allow for traffic to exist for DNS. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-network-dns-policy + namespace: +spec: + ingress: + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + podSelector: + matchLabels: + k8s-app: kube-dns + policyTypes: + - Ingress +``` + > **Note:** Operators must manage network policies as normal for additional namespaces that are created. ## Known Issues diff --git a/content/rancher/v2.0-v2.4/_index.md b/content/rancher/v2.0-v2.4/_index.md index db06385ef7f..ff2500ddd4d 100644 --- a/content/rancher/v2.0-v2.4/_index.md +++ b/content/rancher/v2.0-v2.4/_index.md @@ -1,5 +1,5 @@ --- -title: v2.0-v2.4 +title: v2.0-v2.4.x weight: 2 showBreadcrumb: false --- diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md index 2bf2f7681fe..ff6631bef9a 100644 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md @@ -48,7 +48,7 @@ For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | | `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | | `certmanager.version` | "" | `string` - set cert-manager compatibility | diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md index 02a6fe1d39a..f991ddeef28 100644 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md +++ b/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md @@ -7,7 +7,7 @@ Interact with Rancher using command line interface (CLI) tools from your worksta ## Rancher CLI -Follow the steps in [rancher cli](../cli). +Follow the steps in [rancher cli](../../cli). Ensure you can run `rancher kubectl get pods` successfully. diff --git a/content/rancher/v2.5/_index.md b/content/rancher/v2.5/_index.md index 95cad4ab71b..50324f5568b 100644 --- a/content/rancher/v2.5/_index.md +++ b/content/rancher/v2.5/_index.md @@ -1,5 +1,5 @@ --- -title: v2.5 +title: v2.5.x weight: 1 showBreadcrumb: false --- diff --git a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md b/content/rancher/v2.5/en/backups/migrating-rancher/_index.md index 1295c7c0940..49e3e58d52a 100644 --- a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md +++ b/content/rancher/v2.5/en/backups/migrating-rancher/_index.md @@ -36,7 +36,7 @@ kind: Secret metadata: name: s3-creds type: Opaque -data: +stringData: accessKey: secretKey: ``` @@ -94,4 +94,4 @@ Use the same version of Helm to install Rancher, that was used on the first clus helm install rancher rancher-latest/rancher \ --namespace cattle-system \ --set hostname= \ -``` \ No newline at end of file +``` diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md index 610282c58f8..68996a69b8c 100644 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md @@ -9,55 +9,19 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members) -- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) -- [Editing Clusters in the Rancher UI](#editing-clusters-in-the-rancher-ui) -- [Editing Clusters with YAML](#editing-clusters-with-yaml) -- [Updating ingress-nginx](#updating-ingress-nginx) +### Cluster Configuration References + +The cluster configuration options depend on the type of Kubernetes cluster: + +- [RKE Cluster Configuration](./rke-config-reference) +- [EKS Cluster Configuration](./eks-config-reference) +- [GKE Cluster Configuration](./gke-config-reference) ### Cluster Management Capabilities by Cluster Type -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. +The options and settings available for an existing cluster change based on the method that you used to provision it. The following table summarizes the options and settings available for each cluster type: {{% include file="/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table" %}} -### Editing Clusters in the Rancher UI - -To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. - -In [clusters launched by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - -Note that these options are not available for registered clusters or hosted Kubernetes clusters. - -Option | Description | ----------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes). | - Network Provider | The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | - Project Network Isolation | If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin.| - Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | - Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | - Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | - Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | - -### Editing Clusters with YAML - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from File**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -### Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md new file mode 100644 index 00000000000..9fa6ea0ce89 --- /dev/null +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md @@ -0,0 +1,421 @@ +--- +title: EKS Cluster Configuration Reference +shortTitle: EKS Cluster Configuration +weight: 2 +--- + +{{% tabs %}} +{{% tab "Rancher v2.5.6+" %}} + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Secrets Encryption + + + +Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) + +### API Server Endpoint Access + + + +Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Private-only API Endpoints + +If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. + +There are two ways to avoid this extra manual step: +- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. +- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). + +### Public Access Endpoints + + + +Optionally limit access to the public endpoint via explicit CIDR blocks. + +If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. + +One of the following is required to enable private access: +- Rancher's IP must be part of an allowed CIDR block +- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group + +For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Subnet + + + +| Option | Description | +| ------- | ------------ | +| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | +| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Logging + + + +Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. + +Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. + +For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) + +### Managed Node Groups + + + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. + +For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + +#### Bring your own launch template + +A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. + +| Option | Description | Required/Optional | +| ------ | ----------- | ----------------- | +| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | +| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | +| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | +| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | +| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | +| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | + +#### Rancher-managed launch templates + +If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. + +#### Custom AMIs + +If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. + +If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. + +>**Note** +>The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. + +#### Spot instances + +Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. + +>**Note** +>Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. + +#### Node Group Settings + +The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. + +| Option | Description | +| ------- | ------------ | +| Node Group Name | The name of the node group. | +| Desired ASG Size | The desired number of instances. | +| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Labels | Kubernetes labels applied to the nodes in the managed node group. | +| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | + + +{{% /tab %}} +{{% tab "Rancher v2.5.0-v2.5.5" %}} + +### Changes in Rancher v2.5 + +More EKS options can be configured when you create an EKS cluster in Rancher, including the following: + +- Managed node groups +- Desired size, minimum size, maximum size (requires the Cluster Autoscaler to be installed) +- Control plane logging +- Secrets encryption with KMS + +The following capabilities have been added for configuring EKS clusters in Rancher: + +- GPU support +- Exclusively use managed nodegroups that come with the most up-to-date AMIs +- Add new nodes +- Upgrade nodes +- Add and remove node groups +- Disable and enable private access +- Add restrictions to public access +- Use your cloud credentials to create the EKS cluster instead of passing in your access key and secret key + +Due to the way that the cluster data is synced with EKS, if the cluster is modified from another source, such as in the EKS console, and in Rancher within five minutes, it could cause some changes to be overwritten. For information about how the sync works and how to configure it, refer to [this section](#syncing). + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Secrets Encryption + + + +Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) + +### API Server Endpoint Access + + + +Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Private-only API Endpoints + +If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. + +There are two ways to avoid this extra manual step: +- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. +- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). + +### Public Access Endpoints + + + +Optionally limit access to the public endpoint via explicit CIDR blocks. + +If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. + +One of the following is required to enable private access: +- Rancher's IP must be part of an allowed CIDR block +- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group + +For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Subnet + + + +| Option | Description | +| ------- | ------------ | +| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | +| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Logging + + + +Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. + +Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. + +For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) + +### Managed Node Groups + + + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. + +For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + +Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version. You can configure whether the AMI has GPU enabled. + +| Option | Description | +| ------- | ------------ | +| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | +| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | + +{{% /tab %}} +{{% tab "Rancher prior to v2.5" %}} + + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Access Key | Enter the access key that you created for your IAM policy. | +| Secret Key | Enter the secret key that you created for your IAM policy. | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Public IP for Worker Nodes + + + +Your selection for this option determines what options are available for **VPC & Subnet**. + +Option | Description +-------|------------ +Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. +No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. + +### VPC & Subnet + + + +The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) + +Option | Description + -------|------------ + Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. + Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + + +If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. + +{{% accordion id="yes" label="Click to expand" %}} + +If you're using **Custom: Choose from your existing VPC and Subnets**: + +(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) + +1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. + +1. From the drop-down that displays, choose a VPC. + +1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + +1. Click **Next: Select Security Group**. +{{% /accordion %}} + +If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. +{{% accordion id="no" label="Click to expand" %}} +Follow the steps below. + +>**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). + +1. From the drop-down that displays, choose a VPC. + +1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + +{{% /accordion %}} + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Instance Options + + + +Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. + +Option | Description +-------|------------ +Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. +Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. +Desired ASG Size | The number of instances that your cluster will provision. +User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ + +{{% /tab %}} +{{% /tabs %}} + + + +### Configuring the Refresh Interval + +{{% tabs %}} +{{% tab "Rancher v2.5.8+" %}} + +The `eks-refresh-cron` setting is deprecated. It has been migrated to the `eks-refresh` setting, which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting eks-refresh`. + +If the `eks-refresh-cron` setting was previously set, the migration will happen automatically. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. + +{{% /tab %}} +{{% tab "Before v2.5.8" %}} + +It is possible to change the refresh interval through the setting `eks-refresh-cron`. This setting accepts values in the Cron format. The default is `*/5 * * * *`. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. + +{{% /tab %}} +{{% /tabs %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md similarity index 97% rename from content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/config-reference/_index.md rename to content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md index dca199f8544..1738c2aef5f 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/config-reference/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md @@ -1,14 +1,15 @@ --- title: GKE Cluster Configuration Reference -weight: 1 +shortTitle: GKE Cluster Configuration +weight: 3 --- {{% tabs %}} {{% tab "v2.5.8" %}} -### Changes in v2.5.8 +# Changes in v2.5.8 -- We now support private GKE clusters. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/#private-clusters) +- We now support private GKE clusters. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](./private-clusters) - [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc) are now supported. - We now support more configuration options for Rancher managed GKE clusters: - Project @@ -289,6 +290,17 @@ Access scopes are the legacy method of specifying permissions for your nodes. For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + +### Configuring the Refresh Interval + +The refresh interval can be configured through the setting "gke-refresh", which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting gke-refresh`. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for GCP APIs. + {{% /tab %}} {{% tab "Rancher before v2.5.8" %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/private-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md similarity index 96% rename from content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/private-clusters/_index.md rename to content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md index d938b790361..d66fdb087bd 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/private-clusters/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md @@ -1,8 +1,12 @@ - +--- title: Private Clusters weight: 2 +aliases: + - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/private-clusters --- +_Available as of v2.5.8_ + In GKE, [private clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept) are clusters whose nodes are isolated from inbound and outbound traffic by assigning them internal IP addresses only. Private clusters in GKE have the option of exposing the control plane endpoint as a publicly accessible address or as a private address. This is different from other Kubernetes providers, which may refer to clusters with private control plane endpoints as "private clusters" but still allow traffic to and from nodes. You may want to create a cluster with private nodes, with or without a public control plane endpoint, depending on your organization's networking and security requirements. A GKE cluster provisioned from Rancher can use isolated nodes by selecting "Private Cluster" in the Cluster Options (under "Show advanced options"). The control plane endpoint can optionally be made private by selecting "Enable Private Endpoint". ### Private Nodes diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md new file mode 100644 index 00000000000..6d89a799f1e --- /dev/null +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md @@ -0,0 +1,79 @@ +--- +title: RKE Cluster Configuration +weight: 1 +--- + +In [clusters launched by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. + +- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) +- [Editing Clusters with YAML](#editing-clusters-with-yaml) +- [Updating ingress-nginx](#updating-ingress-nginx) + +# Configuration Options in the Rancher UI + +To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. + +Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) + +### Kubernetes Version + +The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes). + +### Network Provider + +The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. + +### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. + +In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### Nginx Ingress + +If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. + +### Metrics Server Monitoring + +Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. + +### Pod Security Policy Support + +Enables [pod security policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. + +### Docker version on nodes + +Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. + +### Docker Root Directory + +The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. + +### Default Pod Security Policy + +If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. + +### Cloud Provider + +If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. + +# Editing Clusters with YAML + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. +- To read from an existing RKE file, click **Read from File**. + +![image]({{}}/img/rancher/cluster-options-yaml.png) + +For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). + +For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) + +# Updating ingress-nginx + +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. + +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/syncing/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/_index.md similarity index 96% rename from content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/syncing/_index.md rename to content/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/_index.md index 03184492969..13a6b1e7f98 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/syncing/_index.md +++ b/content/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/_index.md @@ -1,4 +1,9 @@ -# Syncing +--- +title: Syncing +weight: 10 +aliases: + - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/syncing +--- Syncing is the feature for EKS and GKE clusters that causes Rancher to update the clusters' values so they are up to date with their corresponding cluster object in the hosted Kubernetes provider. This enables Rancher to not be the sole owner of a hosted cluster’s state. Its largest limitation is that processing an update from Rancher and another source at the same time or within 5 minutes of one finishing may cause the state from one source to completely overwrite the other. diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md index 437daf93b53..f47bb016f57 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md @@ -10,18 +10,15 @@ Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon - [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) - [Amazon VPC](#amazon-vpc) - [IAM Policies](#iam-policies) -- [Architecture](#architecture) - [Create the EKS Cluster](#create-the-eks-cluster) - [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) -- [Troubleshooting](#troubleshooting) +- [Architecture](#architecture) - [AWS Service Events](#aws-service-events) - [Security and Compliance](#security-and-compliance) - [Tutorial](#tutorial) - [Minimum EKS Permissions](#minimum-eks-permissions) - - [Service Role Permissions](#service-role-permissions) - - [VPC Permissions](#vpc-permissions) - [Syncing](#syncing) - +- [Troubleshooting](#troubleshooting) # Prerequisites in Amazon Web Services >**Note** @@ -47,13 +44,6 @@ Rancher needs access to your AWS account in order to provision and administer yo For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). -# Architecture - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) # Create the EKS Cluster @@ -82,406 +72,17 @@ You can access your cluster after its state is updated to **Active.** - `Default`, containing the `default` namespace - `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - # EKS Cluster Configuration Reference -### Changes in Rancher v2.5 +For the full list of EKS cluster configuration options, see [this page.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference) -More EKS options can be configured when you create an EKS cluster in Rancher, including the following: +# Architecture -- Managed node groups -- Desired size, minimum size, maximum size (requires the Cluster Autoscaler to be installed) -- Control plane logging -- Secrets encryption with KMS +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. -The following capabilities have been added for configuring EKS clusters in Rancher: +
Managing Kubernetes Clusters through Rancher's Authentication Proxy
-- GPU support -- Exclusively use managed nodegroups that come with the most up-to-date AMIs -- Add new nodes -- Upgrade nodes -- Add and remove node groups -- Disable and enable private access -- Add restrictions to public access -- Use your cloud credentials to create the EKS cluster instead of passing in your access key and secret key - -Due to the way that the cluster data is synced with EKS, if the cluster is modified from another source, such as in the EKS console, and in Rancher within five minutes, it could cause some changes to be overwritten. For information about how the sync works and how to configure it, refer to [this section](#syncing). - -{{% tabs %}} -{{% tab "Rancher v2.5.6+" %}} - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -#### Bring your own launch template - -A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. - -| Option | Description | Required/Optional | -| ------ | ----------- | ----------------- | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | -| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | -| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | -| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | -| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | -| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | - -#### Rancher-managed launch templates - -If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. - -#### Custom AMIs - -If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. - -If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. - ->**Note** ->The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. - -#### Spot instances - -Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. - ->**Note** ->Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. - -#### Node Group Settings - -The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. - -| Option | Description | -| ------- | ------------ | -| Node Group Name | The name of the node group. | -| Desired ASG Size | The desired number of instances. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Labels | Kubernetes labels applied to the nodes in the managed node group. | -| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | - - -{{% /tab %}} -{{% tab "Rancher v2.5.0-v2.5.5" %}} - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.x/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version. You can configure whether the AMI has GPU enabled. - -| Option | Description | -| ------- | ------------ | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | - -{{% /tab %}} -{{% tab "Rancher prior to v2.5" %}} - - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Access Key | Enter the access key that you created for your IAM policy. | -| Secret Key | Enter the secret key that you created for your IAM policy. | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Public IP for Worker Nodes - - - -Your selection for this option determines what options are available for **VPC & Subnet**. - -Option | Description --------|------------ -Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. -No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. - -### VPC & Subnet - - - -The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) - -Option | Description - -------|------------ - Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. - Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - - -If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. - -{{% accordion id="yes" label="Click to expand" %}} - -If you're using **Custom: Choose from your existing VPC and Subnets**: - -(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) - -1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -1. Click **Next: Select Security Group**. -{{% /accordion %}} - -If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. -{{% accordion id="no" label="Click to expand" %}} -Follow the steps below. - ->**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -{{% /accordion %}} - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Instance Options - - - -Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. - -Option | Description --------|------------ -Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. -Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. -Desired ASG Size | The number of instances that your cluster will provision. -User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ - -{{% /tab %}} -{{% /tabs %}} - - -# Troubleshooting - -If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) - -If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) - -For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). +![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) # AWS Service Events @@ -499,244 +100,18 @@ This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-ra # Minimum EKS Permissions -Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. Additional permissions are required for Rancher to provision the `Service Role` and `VPC` resources. Optionally these resources can be created **before** the cluster creation and will be selectable when defining the cluster configuration. - -Resource | Description ----------|------------ -Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#service-role-permissions). -VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions]({{}}/rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#vpc-permissions). - - -Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "EC2Permisssions", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances", - "ec2:RevokeSecurityGroupIngress", - "ec2:RevokeSecurityGroupEgress", - "ec2:DescribeVpcs", - "ec2:DescribeTags", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeRouteTables", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeKeyPairs", - "ec2:DescribeInternetGateways", - "ec2:DescribeImages", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeAccountAttributes", - "ec2:DeleteTags", - "ec2:DeleteSecurityGroup", - "ec2:DeleteKeyPair", - "ec2:CreateTags", - "ec2:CreateSecurityGroup", - "ec2:CreateLaunchTemplateVersion", - "ec2:CreateLaunchTemplate", - "ec2:CreateKeyPair", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:AuthorizeSecurityGroupEgress" - ], - "Resource": "*" - }, - { - "Sid": "CloudFormationPermisssions", - "Effect": "Allow", - "Action": [ - "cloudformation:ListStacks", - "cloudformation:ListStackResources", - "cloudformation:DescribeStacks", - "cloudformation:DescribeStackResources", - "cloudformation:DescribeStackResource", - "cloudformation:DeleteStack", - "cloudformation:CreateStackSet", - "cloudformation:CreateStack" - ], - "Resource": "*" - }, - { - "Sid": "IAMPermissions", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:ListInstanceProfilesForRole", - "iam:ListInstanceProfiles", - "iam:ListAttachedRolePolicies", - "iam:GetRole", - "iam:GetInstanceProfile", - "iam:DetachRolePolicy", - "iam:DeleteRole", - "iam:CreateRole", - "iam:AttachRolePolicy" - ], - "Resource": "*" - }, - { - "Sid": "KMSPermisssions", - "Effect": "Allow", - "Action": "kms:ListKeys", - "Resource": "*" - }, - { - "Sid": "EKSPermisssions", - "Effect": "Allow", - "Action": [ - "eks:UpdateNodegroupVersion", - "eks:UpdateNodegroupConfig", - "eks:UpdateClusterVersion", - "eks:UpdateClusterConfig", - "eks:UntagResource", - "eks:TagResource", - "eks:ListUpdates", - "eks:ListTagsForResource", - "eks:ListNodegroups", - "eks:ListFargateProfiles", - "eks:ListClusters", - "eks:DescribeUpdate", - "eks:DescribeNodegroup", - "eks:DescribeFargateProfile", - "eks:DescribeCluster", - "eks:DeleteNodegroup", - "eks:DeleteFargateProfile", - "eks:DeleteCluster", - "eks:CreateNodegroup", - "eks:CreateFargateProfile", - "eks:CreateCluster" - ], - "Resource": "*" - } - ] -} -``` - -### Service Role Permissions - -Rancher will create a service role with the following trust policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "sts:AssumeRole", - "Principal": { - "Service": "eks.amazonaws.com" - }, - "Effect": "Allow", - "Sid": "" - } - ] -} -``` - -This role will also have two role policy attachments with the following policies ARNs: - -``` -arn:aws:iam::aws:policy/AmazonEKSClusterPolicy -arn:aws:iam::aws:policy/AmazonEKSServicePolicy -``` - -Permissions required for Rancher to create service role on users behalf during the EKS cluster creation process. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "IAMPermisssions", - "Effect": "Allow", - "Action": [ - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreateRole", - "iam:CreateServiceLinkedRole", - "iam:DeleteInstanceProfile", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfiles", - "iam:ListInstanceProfilesForRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile" - ], - "Resource": "*" - } - ] -} -``` - -### VPC Permissions - -Permissions required for Rancher to create VPC and associated resources. - -```json -{ - "Sid": "VPCPermissions", - "Effect": "Allow", - "Action": [ - "ec2:ReplaceRoute", - "ec2:ModifyVpcAttribute", - "ec2:ModifySubnetAttribute", - "ec2:DisassociateRouteTable", - "ec2:DetachInternetGateway", - "ec2:DescribeVpcs", - "ec2:DeleteVpc", - "ec2:DeleteTags", - "ec2:DeleteSubnet", - "ec2:DeleteRouteTable", - "ec2:DeleteRoute", - "ec2:DeleteInternetGateway", - "ec2:CreateVpc", - "ec2:CreateSubnet", - "ec2:CreateSecurityGroup", - "ec2:CreateRouteTable", - "ec2:CreateRoute", - "ec2:CreateInternetGateway", - "ec2:AttachInternetGateway", - "ec2:AssociateRouteTable" - ], - "Resource": "*" -} -``` +See [this page](./permissions) for the minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. # Syncing -The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../syncing) +The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/) -### Configuring the Refresh Interval +For information on configuring the refresh interval, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/#configuring-the-refresh-interval) -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} +# Troubleshooting -The `eks-refresh-cron` setting is deprecated. It has been migrated to the `eks-refresh` setting, which is an integer representing seconds. +If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) -The default value is 300 seconds. +If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) -The syncing interval can be changed by running `kubectl edit setting eks-refresh`. - -If the `eks-refresh-cron` setting was previously set, the migration will happen automatically. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. - -{{% /tab %}} -{{% tab "Before v2.5.8" %}} - -It is possible to change the refresh interval through the setting `eks-refresh-cron`. This setting accepts values in the Cron format. The default is `*/5 * * * *`. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. - -{{% /tab %}} -{{% /tabs %}} +For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md new file mode 100644 index 00000000000..0567e110d02 --- /dev/null +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md @@ -0,0 +1,217 @@ +--- +title: Minimum EKS Permissions +weight: 1 +--- + +Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. Additional permissions are required for Rancher to provision the `Service Role` and `VPC` resources. Optionally these resources can be created **before** the cluster creation and will be selectable when defining the cluster configuration. + +Resource | Description +---------|------------ +Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions](#service-role-permissions). +VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions](#vpc-permissions). + + +Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EC2Permisssions", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances", + "ec2:RevokeSecurityGroupIngress", + "ec2:RevokeSecurityGroupEgress", + "ec2:DescribeVpcs", + "ec2:DescribeTags", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeRouteTables", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeKeyPairs", + "ec2:DescribeInternetGateways", + "ec2:DescribeImages", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeAccountAttributes", + "ec2:DeleteTags", + "ec2:DeleteSecurityGroup", + "ec2:DeleteKeyPair", + "ec2:CreateTags", + "ec2:CreateSecurityGroup", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateLaunchTemplate", + "ec2:CreateKeyPair", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress" + ], + "Resource": "*" + }, + { + "Sid": "CloudFormationPermisssions", + "Effect": "Allow", + "Action": [ + "cloudformation:ListStacks", + "cloudformation:ListStackResources", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStackResource", + "cloudformation:DeleteStack", + "cloudformation:CreateStackSet", + "cloudformation:CreateStack" + ], + "Resource": "*" + }, + { + "Sid": "IAMPermissions", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListInstanceProfilesForRole", + "iam:ListInstanceProfiles", + "iam:ListAttachedRolePolicies", + "iam:GetRole", + "iam:GetInstanceProfile", + "iam:DetachRolePolicy", + "iam:DeleteRole", + "iam:CreateRole", + "iam:AttachRolePolicy" + ], + "Resource": "*" + }, + { + "Sid": "KMSPermisssions", + "Effect": "Allow", + "Action": "kms:ListKeys", + "Resource": "*" + }, + { + "Sid": "EKSPermisssions", + "Effect": "Allow", + "Action": [ + "eks:UpdateNodegroupVersion", + "eks:UpdateNodegroupConfig", + "eks:UpdateClusterVersion", + "eks:UpdateClusterConfig", + "eks:UntagResource", + "eks:TagResource", + "eks:ListUpdates", + "eks:ListTagsForResource", + "eks:ListNodegroups", + "eks:ListFargateProfiles", + "eks:ListClusters", + "eks:DescribeUpdate", + "eks:DescribeNodegroup", + "eks:DescribeFargateProfile", + "eks:DescribeCluster", + "eks:DeleteNodegroup", + "eks:DeleteFargateProfile", + "eks:DeleteCluster", + "eks:CreateNodegroup", + "eks:CreateFargateProfile", + "eks:CreateCluster" + ], + "Resource": "*" + } + ] +} +``` + +### Service Role Permissions + +Rancher will create a service role with the following trust policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Effect": "Allow", + "Sid": "" + } + ] +} +``` + +This role will also have two role policy attachments with the following policies ARNs: + +``` +arn:aws:iam::aws:policy/AmazonEKSClusterPolicy +arn:aws:iam::aws:policy/AmazonEKSServicePolicy +``` + +Permissions required for Rancher to create service role on users behalf during the EKS cluster creation process. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "IAMPermisssions", + "Effect": "Allow", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreateRole", + "iam:CreateServiceLinkedRole", + "iam:DeleteInstanceProfile", + "iam:DeleteRole", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile" + ], + "Resource": "*" + } + ] +} +``` + +### VPC Permissions + +Permissions required for Rancher to create VPC and associated resources. + +```json +{ + "Sid": "VPCPermissions", + "Effect": "Allow", + "Action": [ + "ec2:ReplaceRoute", + "ec2:ModifyVpcAttribute", + "ec2:ModifySubnetAttribute", + "ec2:DisassociateRouteTable", + "ec2:DetachInternetGateway", + "ec2:DescribeVpcs", + "ec2:DeleteVpc", + "ec2:DeleteTags", + "ec2:DeleteSubnet", + "ec2:DeleteRouteTable", + "ec2:DeleteRoute", + "ec2:DeleteInternetGateway", + "ec2:CreateVpc", + "ec2:CreateSubnet", + "ec2:CreateSecurityGroup", + "ec2:CreateRouteTable", + "ec2:CreateRoute", + "ec2:CreateInternetGateway", + "ec2:AttachInternetGateway", + "ec2:AssociateRouteTable" + ], + "Resource": "*" +} +``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md index a2541bd4a10..d58fee74333 100644 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md +++ b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md @@ -4,6 +4,7 @@ shortTitle: Google Kubernetes Engine weight: 2105 aliases: - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-gke/ + - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke --- {{% tabs %}} @@ -12,7 +13,7 @@ aliases: - [Prerequisites](#prerequisites) - [Provisioning a GKE Cluster](#provisioning-a-gke-cluster) - [Private Clusters](#private-clusters) -- [Configuration Reference](./config-reference) +- [Configuration Reference](#configuration-reference) - [Updating Kubernetes Version](#updating-kubernetes-version) - [Syncing](#syncing) @@ -84,11 +85,11 @@ You can access your cluster after its state is updated to **Active.** # Private Clusters -Private GKE clusters are supported. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](./private-clusters) +Private GKE clusters are supported. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/) # Configuration Reference -More configuration options are available for v2.5.8. For details on configuring GKE clusters in Rancher, see [this page.](./config-reference) +For details on configuring GKE clusters in Rancher, see [this page.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference) # Updating Kubernetes Version The Kubernetes version of a cluster can be upgraded to any version available in the region or zone fo the GKE cluster. Upgrading the master Kubernetes version does not automatically upgrade worker nodes. Nodes can be upgraded independently. @@ -98,17 +99,10 @@ The Kubernetes version of a cluster can be upgraded to any version available in # Syncing -The GKE provisioner can synchronize the state of an GKE cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../syncing) +The GKE provisioner can synchronize the state of a GKE cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/syncing) -### Configuring the Refresh Interval +For information on configuring the refresh interval, see [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/#configuring-the-refresh-interval) -The refresh interval can be configured through the setting "gke-refresh", which is an integer representing seconds. - -The default value is 300 seconds. - -The syncing interval can be changed by running `kubectl edit setting gke-refresh`. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for GCP APIs. {{% /tab %}} {{% tab "Rancher before v2.5.8" %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md new file mode 100644 index 00000000000..ce7d7b67ae2 --- /dev/null +++ b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md @@ -0,0 +1,47 @@ +--- +title: Windows and Linux Cluster Feature Parity +weight: 3 +--- + +Windows clusters do not share the same feature support as Linux clusters. + +The following chart describes the feature parity between Windows and Linux on Rancher as of Rancher v2.5.8: + +**Component** | **Linux** | **Windows** +--- | --- | --- +**Distributions** | | +RKE | Supported | Supported +RKE2 | Supported | Tenatively Planned For 2.6.x +K3S | Supported | Not Supported +EKS | Supported | Not Supported +GKE | Supported | Not Supported +AKS | Supported | Not Supported +**Rancher Components** | | +Server | Supported | Not Supported +Agent | Supported | Supported +Fleet | Supported | Supported +EKS Operator | Supported | Not Supported +AKS Operator | Not Supported | Not Supported +GKE Operator | Not Supported | Not Supported +Alerting v1 | Supported | Supported +Monitoring v1 | Supported | Supported +Logging v1 | Supported | Supported +Monitoring/Alerting v2 | Supported | Supported In 2.5.8+ +Logging v2 | Supported | Supported In 2.5.8+ +Istio | Supported | Not Supported +Catalog v1 | Supported | Not Supported +Catalog v2 | Supported | Not Supported +OPA | Supported | Not Supported +Longhorn | Supported | Not Supported +CIS Scans | Supported | Not Supported +Backup/Restore Operator | Supported | Not Supported +**CNI / Add-ons** | | +Flannel | Supported | Supported +Canal | Supported | Not Supported +Calico | Supported | Tentatively Planned for 2.6.x +Cilium | Supported | Not Supported +Multus | Supported | Not Supported +Traefik | Supported | Not Supported +NGINX Ingress | Supported | Not Supported + +For updated information on feature support, you may visit [rancher/windows](https://github.com/rancher/windows) on GitHub. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md index c9b49d57ac3..68bdf90f03e 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md @@ -48,7 +48,7 @@ For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.5/en/installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | | `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs | | `certmanager.version` | "" | `string` - set cert-manager compatibility | diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md index ce0ae4f4838..2d591e83ab4 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md +++ b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md @@ -40,8 +40,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ {{% tab "Rancher before v2.5.8" %}} ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --namespace cattle-system \ --set hostname= \ --set certmanager.version= \ @@ -63,8 +62,7 @@ helm template ./rancher-.tgz --output-dir . \ ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --no-hooks \ # prevent files for Helm hooks from being generated --namespace cattle-system \ --set hostname= \ @@ -77,8 +75,7 @@ helm template ./rancher-.tgz --output-dir . \ If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --no-hooks \ # prevent files for Helm hooks from being generated --namespace cattle-system \ --set hostname= \ @@ -94,8 +91,7 @@ helm template ./rancher-.tgz --output-dir . \ ```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ @@ -107,8 +103,7 @@ helm template ./rancher-.tgz --output-dir . \ If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: ```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-linux/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md similarity index 98% rename from content/rancher/v2.5/en/installation/install-rancher-on-linux/_index.md rename to content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md index 0ea25e9deb6..fc61e42affe 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-linux/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md @@ -1,6 +1,8 @@ --- -title: Install/Upgrade Rancher on a Linux OS +title: Install/Upgrade Rancher with RancherD weight: 3 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux --- _Available as of Rancher v2.5.4_ diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md similarity index 99% rename from content/rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md rename to content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md index d1eec617cbf..2e5965b9453 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md @@ -1,6 +1,8 @@ --- title: RancherD Configuration Reference weight: 1 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-linux/rollbacks/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md similarity index 75% rename from content/rancher/v2.5/en/installation/install-rancher-on-linux/rollbacks/_index.md rename to content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md index 20870c06ec7..68a8b6dab82 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-linux/rollbacks/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md @@ -1,6 +1,8 @@ --- title: Rollbacks weight: 3 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux/rollbacks --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-linux/upgrades/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md similarity index 97% rename from content/rancher/v2.5/en/installation/install-rancher-on-linux/upgrades/_index.md rename to content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md index 4700c807d82..a57c16f1a6e 100644 --- a/content/rancher/v2.5/en/installation/install-rancher-on-linux/upgrades/_index.md +++ b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md @@ -1,6 +1,8 @@ --- title: Upgrades weight: 2 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux/upgrades --- > RancherD is an experimental feature. diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md index 72509798b96..62f14aa8c15 100644 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md +++ b/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md @@ -111,8 +111,7 @@ helm template rancher ./rancher-.tgz --output-dir . \ The Helm 2 command is as follows: ``` -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ +helm template rancher ./rancher-.tgz --output-dir . \ --namespace cattle-system \ --set hostname= \ --set rancherImage=/rancher/rancher \ diff --git a/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md b/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md index 93e428a3a54..139ca70ac76 100644 --- a/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md +++ b/content/rancher/v2.5/en/installation/resources/update-ca-cert/_index.md @@ -110,10 +110,10 @@ Method 3 can be used as a fallback if method 1 and 2 are unfeasible. ### Method 1: Kubectl command -For each cluster under Rancher management (including `local`) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). +For each cluster under Rancher management (except the `local` Rancher management cluster) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). ``` -kubectl patch clusters -p '{"status":{"agentImage":"dummy"}}' --type merge +kubectl patch clusters.management.cattle.io -p '{"status":{"agentImage":"dummy"}}' --type merge ``` This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. @@ -142,4 +142,4 @@ With this method you are recreating the Rancher agents by running a set of comma First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: -https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b \ No newline at end of file +https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b diff --git a/content/rancher/v2.5/en/quick-start-guide/cli/_index.md b/content/rancher/v2.5/en/quick-start-guide/cli/_index.md index 6a4a6aaa9a0..319967f3478 100644 --- a/content/rancher/v2.5/en/quick-start-guide/cli/_index.md +++ b/content/rancher/v2.5/en/quick-start-guide/cli/_index.md @@ -7,7 +7,7 @@ Interact with Rancher using command line interface (CLI) tools from your worksta ## Rancher CLI -Follow the steps in [rancher cli](../cli). +Follow the steps in [rancher cli](../../cli). Ensure you can run `rancher kubectl get pods` successfully. diff --git a/content/rancher/v2.5/en/security/cve/_index.md b/content/rancher/v2.5/en/security/cve/_index.md index 3c845125277..d1d9e258167 100644 --- a/content/rancher/v2.5/en/security/cve/_index.md +++ b/content/rancher/v2.5/en/security/cve/_index.md @@ -7,12 +7,12 @@ Rancher is committed to informing the community of security issues in our produc | ID | Description | Date | Resolution | |----|-------------|------|------------| -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.5/en/upgrades/rollbacks/). | -| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | -| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | -| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | | [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | \ No newline at end of file +| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | +| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | +| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.5/en/upgrades/rollbacks/). | \ No newline at end of file diff --git a/content/rancher/v2.5/en/security/selinux/_index.md b/content/rancher/v2.5/en/security/selinux/_index.md index fe707e064e2..1dd90df7522 100644 --- a/content/rancher/v2.5/en/security/selinux/_index.md +++ b/content/rancher/v2.5/en/security/selinux/_index.md @@ -9,7 +9,12 @@ _Available as of v2.5.8_ Developed by Red Hat, it is an implementation of mandatory access controls (MAC) on Linux. Mandatory access controls allow an administrator of a system to define how applications and users can access different resources such as files, devices, networks and inter-process communication. SELinux also enhances security by making an OS restrictive by default. -After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. +After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. To check whether SELinux is enabled and enforcing on your system, use `getenforce`: + +``` +# getenforce +Enforcing +``` We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: `rancher-selinux` and `rke2-selinux`. diff --git a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md index 248ad1a58d9..f9a27b3b47d 100644 --- a/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md +++ b/content/rancher/v2.x/en/admin-settings/pod-security-policies/_index.md @@ -73,7 +73,7 @@ We recommend adding PSPs during cluster and project creation instead of adding i 2. Name the policy. -3. Complete each section of the form. Refer to the [Kubernetes documentation]((https://kubernetes.io/docs/concepts/policy/pod-security-policy/)) for more information on what each policy does. +3. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does. # Configuration diff --git a/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md b/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md index 2b8ff6a6c7e..a697b1a3906 100644 --- a/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md +++ b/content/rancher/v2.x/en/deploy-across-clusters/fleet/_index.md @@ -6,7 +6,7 @@ weight: 1 _Available as of Rancher v2.5_ -Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that is works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. +Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. diff --git a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md index 8186ad6918d..1b523500d5f 100644 --- a/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ b/content/rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/_index.md @@ -48,7 +48,7 @@ For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.x/en/installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | | `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | | `certmanager.version` | "" | `string` - set cert-manager compatibility | diff --git a/content/rke/latest/en/cert-mgmt/_index.md b/content/rke/latest/en/cert-mgmt/_index.md index 21f9f53011e..95ba2123720 100644 --- a/content/rke/latest/en/cert-mgmt/_index.md +++ b/content/rke/latest/en/cert-mgmt/_index.md @@ -23,7 +23,8 @@ By default, Kubernetes clusters require certificates and RKE will automatically After the certificates are rotated, the Kubernetes components are automatically restarted. Certificates can be rotated for the following services: - etcd -- kubelet +- kubelet (node certificate) +- kubelet (serving certificate, if [enabled]({{}}/rke/latest/en/config-options/services/#kubelet-options)) - kube-apiserver - kube-proxy - kube-scheduler diff --git a/content/rke/latest/en/example-yamls/_index.md b/content/rke/latest/en/example-yamls/_index.md index 64d3ecf54bb..989623a7f01 100644 --- a/content/rke/latest/en/example-yamls/_index.md +++ b/content/rke/latest/en/example-yamls/_index.md @@ -185,6 +185,9 @@ services: cluster_dns_server: 10.43.0.10 # Fail if swap is on fail_swap_on: false + # Generate a certificate signed by the kube-ca Certificate Authority + # for the kubelet to use as a server certificate + generate_serving_certificate: true # Set max pods to 250 instead of default 110 extra_args: max-pods: 250