From 331b43a83c46dfdbfd8abcff5a4371ec5ea0615e Mon Sep 17 00:00:00 2001 From: Billy Tat Date: Thu, 25 Aug 2022 23:08:19 -0700 Subject: [PATCH] Apply Divio and update links --- .../admin-settings/drivers/drivers.md | 46 ----------- .../version-2.0-2.4/backups/backup/backup.md | 10 +-- .../backups/restore/restore.md | 8 +- .../cluster-admin/tools/istio/setup/setup.md | 26 ------- .../cluster-capabilities-table/index.md | 34 ++++----- .../vsphere-node-template-config.md | 16 ---- ...ntributing.md => contribute-to-rancher.md} | 2 +- .../version-2.0-2.4/explanations.md | 1 + .../skipped-and-not-applicable-tests.md} | 0 .../cluster-alerts}/default-alerts.md | 6 +- .../cluster-logging}/elasticsearch.md | 0 .../cluster-logging}/fluentd.md | 0 .../cluster-logging}/kafka.md | 0 .../cluster-logging}/splunk.md | 4 +- .../cluster-logging}/syslog.md | 0 .../cluster-monitoring}/cluster-metrics.md | 16 ++-- .../cluster-monitoring}/custom-metrics.md | 2 +- .../cluster-monitoring}/expression.md | 4 +- .../cluster-monitoring}/project-monitoring.md | 14 ++-- .../cluster-monitoring}/prometheus.md | 6 +- .../cluster-monitoring}/viewing-metrics.md | 8 +- .../istio/cpu-and-memory-allocations.md} | 0 .../istio/disable-istio.md} | 0 .../istio/rbac-for-istio.md} | 0 .../istio}/release-notes.md | 0 .../integrations-in-rancher}/notifiers.md | 4 +- .../opa-gatekeeper.md | 2 +- .../version-2.0-2.4/{faq => }/faq.md | 6 +- ... container-network-interface-providers.md} | 30 ++++---- ...tl.md => install-and-configure-kubectl.md} | 0 .../version-2.0-2.4/faq/networking.md | 9 +++ .../faq/networking/networking.md | 9 --- ...cher.md => rancher-is-no-longer-needed.md} | 4 +- .../faq/{security => }/security.md | 4 +- .../technical.md => technical-items.md} | 14 ++-- .../faq/{telemetry => }/telemetry.md | 0 .../{upgrades-to-2x => }/upgrades-to-2x.md | 0 .../version-2.0-2.4/getting-started.md | 1 + .../air-gap-helm2}/install-rancher.md | 36 ++++----- .../air-gap-helm2}/launch-kubernetes.md | 12 +-- .../populate-private-registry.md | 12 +-- .../air-gap-helm2}/prepare-nodes.md | 20 ++--- .../node-certificate-recognizedca.md} | 2 +- .../node-certificate.md} | 2 +- .../node-externalssl-certificate.md} | 2 +- .../node-externalssl-recognizedca.md} | 2 +- .../configure-layer-7-nginx-load-balancer.md} | 16 ++-- .../enable-api-audit-log.md} | 6 +- .../helm2/create-nodes-lb}/nginx.md | 2 +- .../helm2/create-nodes-lb}/nlb.md | 16 ++-- .../helm2/helm-init}/troubleshooting.md | 2 +- .../helm2/helm-rancher}/chart-options.md | 16 ++-- .../helm2/helm-rancher}/tls-secrets.md | 0 .../helm2/helm-rancher}/troubleshooting.md | 0 .../helm2/kubernetes-rke}/troubleshooting.md | 0 .../helm2/rke-add-on}/api-auditing.md | 6 +- .../helm2/rke-add-on/layer-4-lb}/nlb.md | 20 ++--- .../helm2/rke-add-on/layer-7-lb}/alb.md | 4 +- .../helm2/rke-add-on/layer-7-lb}/nginx.md | 6 +- .../helm2/rke-add-on}/proxy.md | 8 +- .../troubleshooting}/404-default-backend.md | 4 +- .../generic-troubleshooting.md | 8 +- .../troubleshooting}/job-complete-status.md | 4 +- .../open-ports-with-firewalld.md} | 2 +- .../rke-add-on}/layer-4-lb.md | 24 +++--- .../rke-add-on}/layer-7-lb.md | 26 +++---- .../tune-etcd-for-large-installs.md} | 0 .../istio-traffic-management-features.md} | 4 +- .../rancher-on-arm64.md} | 6 +- .../unsupported-storage-drivers.md} | 2 +- .../rollbacks.md | 6 +- .../troubleshooting.md | 0 .../upgrades}/helm2.md | 26 +++---- .../upgrades}/migrating-from-rke-add-on.md | 8 +- .../upgrades}/namespace-migration.md | 10 +-- .../install-docker.md} | 0 .../port-requirements.md} | 18 ++--- .../infrastructure-private-registry.md} | 36 ++++----- .../install-kubernetes.md} | 22 +++--- .../install-rancher-ha.md} | 38 +++++----- .../publish-images.md} | 12 +-- .../install-kubernetes.md} | 12 +-- .../install-rancher.md | 8 +- .../set-up-infrastructure.md} | 12 +-- .../certificate-troubleshooting.md} | 0 .../roll-back-docker-installed-rancher.md} | 8 +- .../upgrade-docker-installed-rancher.md} | 40 +++++----- .../resources/add-tls-secrets.md} | 2 +- .../resources/choose-a-rancher-version.md} | 10 +-- .../resources/custom-ca-root-certificates.md} | 4 +- .../resources/helm-version-requirements.md} | 2 +- .../resources}/local-system-charts.md | 6 +- .../resources/update-rancher-certificate.md} | 4 +- .../resources/upgrade-cert-manager-helm-2.md} | 4 +- .../resources/upgrade-cert-manager.md} | 6 +- .../upgrade-and-roll-back-kubernetes.md} | 24 +++--- ...e-kubernetes-without-upgrading-rancher.md} | 6 +- .../introduction}/overview.md | 26 +++---- .../introduction/what-are-divio-docs.md | 1 + .../quick-start-guides}/cli.md | 12 +-- .../deploy-rancher-manager/aws.md} | 4 +- .../deploy-rancher-manager/azure.md} | 4 +- .../deploy-rancher-manager/digitalocean.md} | 4 +- .../deploy-rancher-manager/gcp.md} | 4 +- .../deploy-rancher-manager/helm-cli.md} | 8 +- .../deploy-rancher-manager/vagrant.md} | 4 +- .../deploy-workloads/nodeports.md} | 12 +-- .../deploy-workloads/workload-ingress.md} | 6 +- .../version-2.0-2.4/how-to-guides.md | 1 + .../configure-active-directory.md} | 10 +-- .../configure-azure-ad.md} | 20 ++--- .../configure-freeipa.md} | 2 +- .../configure-github.md} | 2 +- .../configure-google-oauth.md} | 18 ++--- .../configure-keycloak.md} | 8 +- .../configure-okta-saml.md} | 0 .../configure-pingidentity.md} | 0 .../create-local-users.md} | 0 .../manage-users-and-groups.md} | 4 +- .../configure-ms-adfs-for-rancher.md} | 30 ++++---- .../configure-rancher-for-ms-adfs.md} | 2 +- .../about-group-permissions.md} | 2 +- .../manage-cluster-drivers.md} | 6 +- .../manage-node-drivers.md} | 4 +- .../access-or-share-templates.md} | 2 +- .../about-rke1-templates/apply-templates.md} | 10 +-- .../creator-permissions.md | 4 +- .../enforce-templates.md} | 6 +- .../example-use-cases.md} | 16 ++-- .../about-rke1-templates/infrastructure.md} | 4 +- .../manage-rke1-templates.md} | 14 ++-- .../override-template-settings.md} | 2 +- .../create-pod-security-policies.md} | 4 +- .../global-default-private-registry.md} | 8 +- .../cluster-and-project-roles.md} | 6 +- .../custom-roles.md} | 10 +-- .../global-permissions.md | 6 +- .../locked-roles.md | 2 +- ...-alerts-for-periodic-scan-on-a-schedule.md | 1 + ...reate-a-custom-benchmark-version-to-run.md | 1 + ...able-alerting-for-rancher-cis-benchmark.md | 1 + .../install-rancher-cis-benchmark.md | 1 + .../run-a-scan-periodically-on-a-schedule.md | 1 + .../cis-scan-guides/run-a-scan.md | 1 + .../cis-scan-guides/skip-tests.md | 1 + .../uninstall-rancher-cis-benchmark.md | 1 + .../cis-scan-guides/view-reports.md | 1 + .../enable-istio-in-cluster-with-psp.md} | 2 +- .../enable-istio-in-cluster.md | 12 +-- .../enable-istio-in-namespace.md | 2 +- .../generate-and-view-traffic.md} | 0 .../istio-setup-guide}/node-selectors.md | 4 +- .../set-up-istio-gateway.md} | 6 +- .../set-up-traffic-management.md | 2 +- .../istio-setup-guide/use-istio-sidecar.md} | 2 +- .../access-clusters/add-users-to-clusters.md} | 14 ++-- .../authorized-cluster-endpoint.md} | 12 +-- .../use-kubectl-and-kubeconfig.md} | 6 +- .../add-a-pod-security-policy.md} | 8 +- .../assign-pod-security-policies.md} | 2 +- .../manage-clusters}/backing-up-etcd.md | 8 +- .../manage-clusters/clean-cluster-nodes.md} | 8 +- .../clone-cluster-configuration.md} | 12 +-- .../about-glusterfs-volumes.md} | 2 +- .../about-persistent-storage.md} | 2 +- .../dynamically-provision-new-storage.md} | 10 +-- .../install-iscsi-volumes.md} | 2 +- .../set-up-existing-storage.md} | 6 +- .../nfs-storage.md} | 2 +- .../persistent-storage-in-amazon-ebs.md} | 2 +- .../vsphere-storage.md} | 14 ++-- .../use-aws-ec2-auto-scaling-groups.md} | 8 +- .../manage-clusters/nodes-and-node-pools.md} | 32 ++++---- .../projects-and-namespaces.md | 40 +++++----- .../manage-clusters}/restoring-etcd.md | 16 ++-- .../manage-clusters/rotate-certificates.md} | 0 .../manage-projects/add-users-to-projects.md} | 10 +-- .../manage-projects/ci-cd-pipelines.md} | 2 +- .../manage-projects/manage-namespaces.md} | 26 +++---- .../manage-pod-security-policies.md} | 6 +- .../about-project-resource-quotas.md} | 4 +- .../override-default-limit-in-namespaces.md} | 10 +-- .../resource-quota-types.md} | 0 .../set-container-default-resource-limits.md} | 2 +- .../back-up-docker-installed-rancher.md} | 4 +- .../back-up-k3s-installed-rancher.md} | 2 +- ...p-rancher-launched-kubernetes-clusters.md} | 2 +- .../restore-docker-installed-rancher.md} | 8 +- .../restore-k3s-installed-rancher.md} | 0 ...unched-kubernetes-clusters-from-backup.md} | 12 +-- .../roll-back-to-v2.0-v2.1.md} | 2 +- .../deploy-apps-across-clusters.md} | 14 ++-- .../adding-catalogs.md | 38 +++++----- .../helm-charts-in-rancher}/built-in.md | 6 +- .../helm-charts-in-rancher}/catalog-config.md | 8 +- .../helm-charts-in-rancher}/creating-apps.md | 6 +- .../helm-charts-in-rancher}/globaldns.md | 14 ++-- .../helm-charts-in-rancher}/launching-apps.md | 12 +-- .../helm-charts-in-rancher}/managing-apps.md | 0 .../multi-cluster-apps.md | 2 +- .../helm-charts-in-rancher}/tutorial.md | 4 +- .../amazon-elb-load-balancer.md} | 6 +- .../ha-k3s-kubernetes-cluster.md} | 16 ++-- .../ha-rke1-kubernetes-cluster.md} | 10 +-- .../mysql-database-in-amazon-rds.md} | 2 +- .../nginx-load-balancer.md} | 0 .../nodes-in-amazon-ec2.md} | 8 +- .../high-availability-installs.md} | 4 +- .../k3s-for-rancher.md} | 6 +- .../rke1-for-rancher.md} | 16 ++-- .../recommended-cluster-architecture.md} | 6 +- .../roles-for-nodes-in-kubernetes.md} | 4 +- .../import-existing-clusters.md} | 16 ++-- .../about-rancher-agents.md} | 6 +- .../other-cloud-providers}/amazon.md | 6 +- .../other-cloud-providers}/azure.md | 0 .../google-compute-engine.md} | 0 .../other-cloud-providers}/vsphere.md | 4 +- .../create-a-digitalocean-cluster.md} | 16 ++-- .../create-an-amazon-ec2-cluster.md} | 20 ++--- .../create-an-azure-cluster.md} | 22 +++--- .../vsphere/create-credentials.md} | 8 +- ...ovision-kubernetes-clusters-in-vsphere.md} | 42 +++++----- .../azure-storageclass-configuration.md} | 2 +- .../network-requirements-for-host-gateway.md} | 0 .../use-windows-clusters/v2.1-v2.2.md} | 16 ++-- ...uirements-for-rancher-managed-clusters.md} | 20 ++--- .../aks.md | 0 .../alibaba.md} | 2 +- .../gke.md | 0 .../huawei.md} | 4 +- .../tencent.md} | 4 +- .../kubernetes-resources-setup}/configmaps.md | 6 +- .../create-services.md} | 0 .../encrypt-http-communication.md} | 2 +- .../horizontal-pod-autoscaler/about-hpas.md} | 2 +- .../hpa-for-rancher-before-2.0.7.md} | 0 .../manage-hpas-with-kubectl.md} | 4 +- .../manage-hpas-with-ui.md} | 4 +- .../test-hpas-with-kubectl.md} | 2 +- .../kubernetes-and-docker-registries.md} | 0 .../add-ingresses.md} | 4 +- .../layer-4-and-layer-7-load-balancing.md} | 0 .../kubernetes-resources-setup}/secrets.md | 6 +- .../workloads-and-pods}/add-a-sidecar.md | 2 +- .../workloads-and-pods}/deploy-workloads.md | 10 +-- .../roll-back-workloads.md} | 0 .../workloads-and-pods}/upgrade-workloads.md | 0 .../discover-services.md | 10 +-- .../expose-services.md | 12 +-- .../install-and-configure-rancher.md} | 34 ++++----- .../kubernetes-introduction.md} | 2 +- .../migrate-from-v1.6-v2.x}/load-balancing.md | 28 +++---- .../migrate-services.md} | 36 ++++----- .../migrate-from-v1.6-v2.x}/monitor-apps.md | 26 +++---- .../schedule-services.md} | 30 ++++---- .../advanced/helm2/rke-add-on/rke-add-on.md | 19 ----- .../resources/chart-options/chart-options.md | 2 +- .../infrastructure-tutorials.md | 10 --- .../installation/resources/resources.md | 30 -------- .../k8s-in-rancher/k8s-in-rancher.md | 76 ------------------- .../about-authentication.md} | 38 +++++----- .../about-provisioning-drivers.md | 46 +++++++++++ .../about-rke1-templates.md} | 50 ++++++------ .../about-the-api.md} | 6 +- .../access-clusters.md} | 16 ++-- .../advanced-options.md} | 0 .../advanced-user-guides.md | 1 + .../air-gap-helm2.md | 10 +-- .../air-gapped-helm-cli-install.md} | 14 ++-- .../authentication-config.md | 1 + ...n-permissions-and-global-configuration.md} | 20 ++--- .../backup-restore-and-disaster-recovery.md} | 4 +- .../best-practices.md | 2 +- ...hecklist-for-production-ready-clusters.md} | 14 ++-- .../pages-for-subheaders/cis-scan-guides.md | 1 + .../cis-scans.md | 10 +-- .../cli-with-rancher.md} | 16 ++-- .../cluster-alerts.md | 18 ++--- .../cluster-configuration.md} | 24 +++--- .../cluster-logging.md | 16 ++-- .../cluster-monitoring.md | 12 +-- .../cluster-yml.md} | 0 ...e-microsoft-ad-federation-service-saml.md} | 6 +- .../configure-openldap.md} | 6 +- .../configure-shibboleth-saml.md} | 8 +- .../create-kubernetes-persistent-storage.md} | 18 ++--- .../creating-a-vsphere-cluster.md | 16 ++++ .../deploy-rancher-manager.md | 16 ++++ .../deploy-rancher-workloads.md | 9 +++ .../downstream-cluster-configuration.md | 1 + .../enable-experimental-features.md} | 8 +- .../helm-charts-in-rancher.md} | 20 ++--- .../helm-rancher.md | 26 +++---- .../helm2-create-nodes-lb.md} | 10 +-- .../helm2-helm-init.md} | 8 +- .../helm2-kubernetes-rke.md} | 14 ++-- .../helm2-rke-add-on-layer-4-lb.md} | 22 +++--- .../helm2-rke-add-on-layer-7-lb.md} | 22 +++--- .../helm2-rke-add-on-troubleshooting.md} | 10 +-- .../pages-for-subheaders/helm2-rke-add-on.md | 19 +++++ .../helm2 => pages-for-subheaders}/helm2.md | 24 +++--- .../horizontal-pod-autoscaler.md} | 10 +-- .../infrastructure-setup.md | 10 +++ .../install-cluster-autoscaler.md} | 2 +- ...nstall-upgrade-on-a-kubernetes-cluster.md} | 36 ++++----- .../installation-and-upgrade.md} | 38 +++++----- .../installation-references.md | 1 + .../installation-requirements.md} | 20 ++--- .../integrations-in-rancher.md | 1 + .../pages-for-subheaders/introduction.md | 1 + .../pages-for-subheaders/istio-setup-guide.md | 26 +++++++ .../istio => pages-for-subheaders}/istio.md | 10 +-- .../kubernetes-cluster-setup.md} | 0 .../kubernetes-clusters-in-rancher-setup.md} | 26 +++---- .../kubernetes-components.md | 18 +++++ .../kubernetes-resources-setup.md | 76 +++++++++++++++++++ .../launch-kubernetes-with-rancher.md} | 12 +-- .../load-balancer-and-ingress-controller.md} | 14 ++-- .../manage-clusters.md} | 8 +- .../manage-persistent-storage.md | 1 + .../manage-project-resource-quotas.md} | 8 +- .../manage-projects.md} | 18 ++--- .../manage-role-based-access-control-rbac.md} | 6 +- .../migrate-from-v1.6-v2.x.md} | 18 ++--- .../pages-for-subheaders/new-user-guides.md | 1 + .../node-template-configuration.md | 1 + .../other-cloud-providers.md | 1 + .../other-installation-methods.md | 4 +- .../other-troubleshooting-tips.md | 1 + .../pipelines.md | 16 ++-- .../project-tools.md} | 12 +-- .../provisioning-storage-examples.md} | 6 +- .../quick-start-guides.md} | 8 +- .../rancher-behind-an-http-proxy.md} | 8 +- .../rancher-manager-architecture.md} | 28 +++---- .../rancher-on-a-single-node-with-docker.md} | 26 +++---- .../rancher-security.md} | 30 ++++---- .../rancher-server-configuration.md | 1 + .../rancher-v2.1-hardening-guides.md} | 4 +- .../rancher-v2.2-hardening-guides.md} | 4 +- .../rancher-v2.3-hardening-guides.md} | 6 +- .../rancher-v2.4-hardening-guides.md} | 4 +- .../pages-for-subheaders/resources.md | 30 ++++++++ .../pages-for-subheaders/rke-add-on.md | 1 + .../set-up-cloud-providers.md} | 14 ++-- ...sters-from-hosted-kubernetes-providers.md} | 12 +-- .../single-node-rancher-in-docker.md | 1 + .../upgrades.md | 36 ++++----- .../use-existing-nodes.md} | 18 ++--- .../use-new-nodes-in-an-infra-provider.md} | 10 +-- .../use-windows-clusters.md} | 28 +++---- .../pages-for-subheaders/user-settings.md | 18 +++++ .../vsphere.md | 10 +-- .../workloads-and-pods.md} | 6 +- .../deployment/deployment.md | 16 ---- .../quick-start-guide/workload/workload.md | 9 --- .../version-2.0-2.4/reference-guides.md | 1 + .../about-the-api}/api-tokens.md | 0 .../best-practices}/containers.md | 0 .../best-practices}/deployment-strategies.md | 4 +- .../best-practices}/deployment-types.md | 6 +- .../best-practices}/management.md | 12 +-- .../cli-with-rancher/kubectl-utility.md | 1 + .../cli-with-rancher/rancher-cli.md | 1 + .../amazon-ec2.md} | 22 +++--- .../node-template-configuration/azure.md} | 0 .../digitalocean.md} | 0 .../vsphere/prior-to-v2.0.4.md} | 6 +- .../vsphere}/v2.0.4.md | 4 +- .../vsphere}/v2.2.0.md | 6 +- .../vsphere}/v2.3.0.md | 6 +- .../vsphere}/v2.3.3.md | 4 +- .../rke1-cluster-configuration.md} | 60 +++++++-------- .../rancher-agent-options.md} | 4 +- .../openldap-config-reference.md} | 6 +- .../amazon-eks-permissions.md} | 6 +- .../installation-references/feature-flags.md | 1 + .../helm-chart-options.md} | 20 ++--- .../installation-references}/tls-settings.md | 4 +- .../kubernetes-concepts.md} | 2 +- .../pipelines}/concepts.md | 0 .../pipelines/configure-persistent-data.md} | 4 +- .../pipelines/example-repositories.md} | 4 +- .../pipelines/example-yaml.md} | 2 +- .../pipelines/pipeline-configuration.md} | 12 +-- .../pipelines/v2.0.x.md} | 2 +- .../rancher-cluster-tools.md} | 18 ++--- .../architecture-recommendations.md | 20 ++--- ...unicating-with-downstream-user-clusters.md | 1 + .../rancher-server-and-components.md | 1 + .../rancher-project-tools}/project-alerts.md | 16 ++-- .../rancher-project-tools}/project-logging.md | 16 ++-- ...ardening-guide-with-cis-v1.3-benchmark.md} | 2 +- ...sessment-guide-with-cis-v1.3-benchmark.md} | 0 ...ardening-guide-with-cis-v1.4-benchmark.md} | 2 +- ...sessment-guide-with-cis-v1.4-benchmark.md} | 0 ...dening-guide-with-cis-v1.4.1-benchmark.md} | 2 +- ...ssment-guide-with-cis-v1.4.1-benchmark.md} | 0 ...dening-guide-with-cis-v1.4.1-benchmark.md} | 2 +- ...ssment-guide-with-cis-v1.4.1-benchmark.md} | 0 ...ardening-guide-with-cis-v1.5-benchmark.md} | 2 +- ...sessment-guide-with-cis-v1.5-benchmark.md} | 0 ...ardening-guide-with-cis-v1.5-benchmark.md} | 2 +- ...sessment-guide-with-cis-v1.5-benchmark.md} | 0 .../security-advisories-and-cves.md} | 2 +- .../rke1-template-example-yaml.md} | 0 .../advanced-options.md} | 6 +- .../http-proxy-configuration.md} | 2 +- .../system-tools.md | 10 +-- .../user-settings}/api-keys.md | 4 +- .../manage-cloud-credentials.md} | 12 +-- .../user-settings/manage-node-templates.md} | 10 +-- .../user-settings/user-preferences.md} | 0 .../migration-tools-cli-reference.md} | 0 .../rancher-v2.3.0/rancher-v2.3.0.md | 4 +- .../rancher-v2.3.3/rancher-v2.3.3.md | 4 +- .../rancher-v2.3.5/rancher-v2.3.5.md | 4 +- .../security/security-scan/security-scan.md | 2 +- .../{troubleshooting => }/troubleshooting.md | 18 ++--- .../kubernetes-components.md | 18 ----- ... => troubleshooting-controlplane-nodes.md} | 2 +- .../etcd.md => troubleshooting-etcd-nodes.md} | 0 ...roxy.md => troubleshooting-nginx-proxy.md} | 0 ...ng-worker-nodes-and-generic-components.md} | 0 .../dns.md | 4 +- .../kubernetes-resources.md | 4 +- .../logging.md | 0 .../networking.md | 6 +- .../rancher-ha.md} | 0 .../registered-clusters.md} | 0 .../user-settings/user-settings.md | 18 ----- 432 files changed, 1995 insertions(+), 1961 deletions(-) delete mode 100644 versioned_docs/version-2.0-2.4/admin-settings/drivers/drivers.md delete mode 100644 versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/setup.md delete mode 100644 versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md rename versioned_docs/version-2.0-2.4/{contributing/contributing.md => contribute-to-rancher.md} (99%) create mode 100644 versioned_docs/version-2.0-2.4/explanations.md rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cis-scans/skipped-tests/skipped-tests.md => explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-alerts/default-alerts => explanations/integrations-in-rancher/cluster-alerts}/default-alerts.md (91%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging/elasticsearch => explanations/integrations-in-rancher/cluster-logging}/elasticsearch.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging/fluentd => explanations/integrations-in-rancher/cluster-logging}/fluentd.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging/kafka => explanations/integrations-in-rancher/cluster-logging}/kafka.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging/splunk => explanations/integrations-in-rancher/cluster-logging}/splunk.md (96%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging/syslog => explanations/integrations-in-rancher/cluster-logging}/syslog.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/cluster-metrics => explanations/integrations-in-rancher/cluster-monitoring}/cluster-metrics.md (84%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/custom-metrics => explanations/integrations-in-rancher/cluster-monitoring}/custom-metrics.md (98%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/expression => explanations/integrations-in-rancher/cluster-monitoring}/expression.md (99%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/project-monitoring => explanations/integrations-in-rancher/cluster-monitoring}/project-monitoring.md (69%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/prometheus => explanations/integrations-in-rancher/cluster-monitoring}/prometheus.md (92%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring/viewing-metrics => explanations/integrations-in-rancher/cluster-monitoring}/viewing-metrics.md (82%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/resources/resources.md => explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/disabling-istio/disabling-istio.md => explanations/integrations-in-rancher/istio/disable-istio.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/rbac/rbac.md => explanations/integrations-in-rancher/istio/rbac-for-istio.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/release-notes => explanations/integrations-in-rancher/istio}/release-notes.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/notifiers => explanations/integrations-in-rancher}/notifiers.md (95%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/opa-gatekeeper => explanations/integrations-in-rancher}/opa-gatekeeper.md (98%) rename versioned_docs/version-2.0-2.4/{faq => }/faq.md (88%) rename versioned_docs/version-2.0-2.4/faq/{networking/cni-providers/cni-providers.md => container-network-interface-providers.md} (85%) rename versioned_docs/version-2.0-2.4/faq/{kubectl/kubectl.md => install-and-configure-kubectl.md} (100%) create mode 100644 versioned_docs/version-2.0-2.4/faq/networking.md delete mode 100644 versioned_docs/version-2.0-2.4/faq/networking/networking.md rename versioned_docs/version-2.0-2.4/faq/{removing-rancher/removing-rancher.md => rancher-is-no-longer-needed.md} (73%) rename versioned_docs/version-2.0-2.4/faq/{security => }/security.md (60%) rename versioned_docs/version-2.0-2.4/faq/{technical/technical.md => technical-items.md} (87%) rename versioned_docs/version-2.0-2.4/faq/{telemetry => }/telemetry.md (100%) rename versioned_docs/version-2.0-2.4/faq/{upgrades-to-2x => }/upgrades-to-2x.md (100%) create mode 100644 versioned_docs/version-2.0-2.4/getting-started.md rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/air-gap-helm2/install-rancher => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2}/install-rancher.md (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/air-gap-helm2/launch-kubernetes => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2}/launch-kubernetes.md (79%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/air-gap-helm2/populate-private-registry => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2}/populate-private-registry.md (92%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/air-gap-helm2/prepare-nodes => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2}/prepare-nodes.md (85%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/3-node-certificate-recognizedca.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md} (98%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/cluster-yml-templates/3-node-certificate/3-node-certificate.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md} (98%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/3-node-externalssl-certificate.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md} (98%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/3-node-externalssl-recognizedca.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md} (98%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md} (87%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/api-audit-log/api-audit-log.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md} (98%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/create-nodes-lb/nginx => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb}/nginx.md (96%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/create-nodes-lb/nlb => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb}/nlb.md (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-init/troubleshooting => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init}/troubleshooting.md (85%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-rancher/chart-options => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher}/chart-options.md (91%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-rancher/tls-secrets => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher}/tls-secrets.md (100%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-rancher/troubleshooting => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher}/troubleshooting.md (100%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/kubernetes-rke/troubleshooting => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke}/troubleshooting.md (100%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/api-auditing => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on}/api-auditing.md (85%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb}/nlb.md (87%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb}/alb.md (92%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb}/nginx.md (71%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/proxy => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on}/proxy.md (82%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting}/404-default-backend.md (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting}/generic-troubleshooting.md (88%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting}/job-complete-status.md (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/firewall/firewall.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md} (96%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/rke-add-on/layer-4-lb => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on}/layer-4-lb.md (94%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/rke-add-on/layer-7-lb => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on}/layer-7-lb.md (89%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/etcd/etcd.md => getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md} (100%) rename versioned_docs/version-2.0-2.4/{installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md => getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md} (91%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/arm64-platform/arm64-platform.md => getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md} (81%) rename versioned_docs/version-2.0-2.4/{installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md => getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md} (92%) rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/rollbacks => getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster}/rollbacks.md (89%) rename versioned_docs/version-2.0-2.4/{installation/resources/troubleshooting => getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster}/troubleshooting.md (100%) rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/upgrades/helm2 => getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades}/helm2.md (75%) rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on => getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades}/migrating-from-rke-add-on.md (89%) rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/upgrades/namespace-migration => getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades}/namespace-migration.md (93%) rename versioned_docs/version-2.0-2.4/{installation/requirements/installing-docker/installing-docker.md => getting-started/installation-and-upgrade/installation-requirements/install-docker.md} (100%) rename versioned_docs/version-2.0-2.4/{installation/requirements/ports/ports.md => getting-started/installation-and-upgrade/installation-requirements/port-requirements.md} (90%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md => getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md} (86%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md => getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md} (86%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/air-gap/install-rancher/install-rancher.md => getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md} (90%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md => getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md} (92%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md => getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md} (87%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/behind-proxy/install-rancher => getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy}/install-rancher.md (82%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md => getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md} (88%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md => getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md} (100%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md => getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md} (89%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md => getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md} (83%) rename versioned_docs/version-2.0-2.4/{installation/resources/tls-secrets/tls-secrets.md => getting-started/installation-and-upgrade/resources/add-tls-secrets.md} (83%) rename versioned_docs/version-2.0-2.4/{installation/resources/choosing-version/choosing-version.md => getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md} (87%) rename versioned_docs/version-2.0-2.4/{installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md => getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md} (80%) rename versioned_docs/version-2.0-2.4/{installation/resources/helm-version/helm-version.md => getting-started/installation-and-upgrade/resources/helm-version-requirements.md} (78%) rename versioned_docs/version-2.0-2.4/{installation/resources/local-system-charts => getting-started/installation-and-upgrade/resources}/local-system-charts.md (87%) rename versioned_docs/version-2.0-2.4/{installation/resources/update-rancher-cert/update-rancher-cert.md => getting-started/installation-and-upgrade/resources/update-rancher-certificate.md} (96%) rename versioned_docs/version-2.0-2.4/{installation/resources/upgrading-cert-manager/helm-2-instructions/helm-2-instructions.md => getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md} (96%) rename versioned_docs/version-2.0-2.4/{installation/resources/upgrading-cert-manager/upgrading-cert-manager.md => getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md} (96%) rename versioned_docs/version-2.0-2.4/{cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md => getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md} (83%) rename versioned_docs/version-2.0-2.4/{admin-settings/k8s-metadata/k8s-metadata.md => getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md} (88%) rename versioned_docs/version-2.0-2.4/{overview => getting-started/introduction}/overview.md (63%) create mode 100644 versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md rename versioned_docs/version-2.0-2.4/{quick-start-guide/cli => getting-started/quick-start-guides}/cli.md (57%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md => getting-started/quick-start-guides/deploy-rancher-manager/aws.md} (94%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md => getting-started/quick-start-guides/deploy-rancher-manager/azure.md} (95%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md => getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md} (94%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md => getting-started/quick-start-guides/deploy-rancher-manager/gcp.md} (95%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md => getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md} (90%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md => getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md} (91%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md => getting-started/quick-start-guides/deploy-workloads/nodeports.md} (87%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md => getting-started/quick-start-guides/deploy-workloads/workload-ingress.md} (85%) create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides.md rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/ad/ad.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md} (96%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/azure-ad/azure-ad.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md} (90%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/freeipa/freeipa.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md} (95%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/github/github.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md} (93%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/google/google.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md} (90%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/keycloak/keycloak.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md} (95%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/okta/okta.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md} (100%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/ping-federate/ping-federate.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md} (100%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/local/local.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md} (100%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/user-groups/user-groups.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md} (95%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md} (75%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md} (93%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/shibboleth/about/about.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md} (95%) rename versioned_docs/version-2.0-2.4/{admin-settings/drivers/cluster-drivers/cluster-drivers.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md} (66%) rename versioned_docs/version-2.0-2.4/{admin-settings/drivers/node-drivers/node-drivers.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md} (86%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md} (96%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/applying-templates/applying-templates.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md} (78%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/creator-permissions => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates}/creator-permissions.md (91%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/enforcement/enforcement.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md} (67%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/example-scenarios/example-scenarios.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md} (74%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md} (90%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/creating-and-revising/creating-and-revising.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md} (89%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/overrides/overrides.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md} (75%) rename versioned_docs/version-2.0-2.4/{admin-settings/pod-security-policies/pod-security-policies.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md} (96%) rename versioned_docs/version-2.0-2.4/{admin-settings/config-private-registry/config-private-registry.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md} (85%) rename versioned_docs/version-2.0-2.4/{admin-settings/rbac/cluster-project-roles/cluster-project-roles.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md} (95%) rename versioned_docs/version-2.0-2.4/{admin-settings/rbac/default-custom-roles/default-custom-roles.md => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md} (93%) rename versioned_docs/version-2.0-2.4/{admin-settings/rbac/global-permissions => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac}/global-permissions.md (94%) rename versioned_docs/version-2.0-2.4/{admin-settings/rbac/locked-roles => how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac}/locked-roles.md (93%) create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md create mode 100644 versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/enable-istio-with-psp.md => how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md} (93%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/enable-istio-in-cluster => how-to-guides/advanced-user-guides/istio-setup-guide}/enable-istio-in-cluster.md (56%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/enable-istio-in-namespace => how-to-guides/advanced-user-guides/istio-setup-guide}/enable-istio-in-namespace.md (96%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/view-traffic/view-traffic.md => how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/node-selectors => how-to-guides/advanced-user-guides/istio-setup-guide}/node-selectors.md (91%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/gateway/gateway.md => how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md} (93%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/set-up-traffic-management => how-to-guides/advanced-user-guides/istio-setup-guide}/set-up-traffic-management.md (96%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio/setup/deploy-workloads/deploy-workloads.md => how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md} (98%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-access/cluster-members/cluster-members.md => how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md} (64%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-access/ace/ace.md => how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md} (71%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-access/kubectl/kubectl.md => how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md} (89%) rename versioned_docs/version-2.0-2.4/{cluster-admin/pod-security-policy/pod-security-policy.md => how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md} (58%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/options/pod-security-policies/pod-security-policies.md => how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md} (91%) rename versioned_docs/version-2.0-2.4/{cluster-admin/backing-up-etcd => how-to-guides/advanced-user-guides/manage-clusters}/backing-up-etcd.md (90%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md => how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md} (97%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cloning-clusters/cloning-clusters.md => how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md} (81%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md} (93%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md} (98%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md} (90%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md} (94%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md} (92%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/examples/nfs/nfs.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md} (97%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/examples/ebs/ebs.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md} (79%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md => how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md} (82%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-autoscaler/amazon/amazon.md => how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md} (97%) rename versioned_docs/version-2.0-2.4/{cluster-admin/nodes/nodes.md => how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md} (82%) rename versioned_docs/version-2.0-2.4/{cluster-admin/projects-and-namespaces => how-to-guides/advanced-user-guides/manage-clusters}/projects-and-namespaces.md (83%) rename versioned_docs/version-2.0-2.4/{cluster-admin/restoring-etcd => how-to-guides/advanced-user-guides/manage-clusters}/restoring-etcd.md (67%) rename versioned_docs/version-2.0-2.4/{cluster-admin/certificate-rotation/certificate-rotation.md => how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md} (100%) rename versioned_docs/version-2.0-2.4/{project-admin/project-members/project-members.md => how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md} (80%) rename versioned_docs/version-2.0-2.4/{project-admin/pipelines/pipelines.md => how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md} (87%) rename versioned_docs/version-2.0-2.4/{project-admin/namespaces/namespaces.md => how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md} (66%) rename versioned_docs/version-2.0-2.4/{project-admin/pod-security-policies/pod-security-policies.md => how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md} (82%) rename versioned_docs/version-2.0-2.4/{project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md => how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md} (95%) rename versioned_docs/version-2.0-2.4/{project-admin/resource-quotas/override-namespace-default/override-namespace-default.md => how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md} (66%) rename versioned_docs/version-2.0-2.4/{project-admin/resource-quotas/quota-type-reference/quota-type-reference.md => how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md} (100%) rename versioned_docs/version-2.0-2.4/{project-admin/resource-quotas/override-container-default/override-container-default.md => how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md} (96%) rename versioned_docs/version-2.0-2.4/{backups/backup/docker-backups/docker-backups.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md} (95%) rename versioned_docs/version-2.0-2.4/{backups/backup/k3s-backups/k3s-backups.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md} (96%) rename versioned_docs/version-2.0-2.4/{backups/backup/rke-backups/rke-backups.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md} (99%) rename versioned_docs/version-2.0-2.4/{backups/restore/docker-restores/docker-restores.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md} (85%) rename versioned_docs/version-2.0-2.4/{backups/restore/k3s-restore/k3s-restore.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md} (100%) rename versioned_docs/version-2.0-2.4/{backups/restore/rke-restore/rke-restore.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md} (91%) rename versioned_docs/version-2.0-2.4/{backups/restore/rke-restore/v2.0-v2.1/v2.0-v2.1.md => how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md} (97%) rename versioned_docs/version-2.0-2.4/{deploy-across-clusters/deploy-across-clusters.md => how-to-guides/new-user-guides/deploy-apps-across-clusters.md} (82%) rename versioned_docs/version-2.0-2.4/{helm-charts/adding-catalogs => how-to-guides/new-user-guides/helm-charts-in-rancher}/adding-catalogs.md (61%) rename versioned_docs/version-2.0-2.4/{helm-charts/built-in => how-to-guides/new-user-guides/helm-charts-in-rancher}/built-in.md (71%) rename versioned_docs/version-2.0-2.4/{helm-charts/catalog-config => how-to-guides/new-user-guides/helm-charts-in-rancher}/catalog-config.md (87%) rename versioned_docs/version-2.0-2.4/{helm-charts/creating-apps => how-to-guides/new-user-guides/helm-charts-in-rancher}/creating-apps.md (97%) rename versioned_docs/version-2.0-2.4/{helm-charts/globaldns => how-to-guides/new-user-guides/helm-charts-in-rancher}/globaldns.md (87%) rename versioned_docs/version-2.0-2.4/{helm-charts/launching-apps => how-to-guides/new-user-guides/helm-charts-in-rancher}/launching-apps.md (83%) rename versioned_docs/version-2.0-2.4/{helm-charts/managing-apps => how-to-guides/new-user-guides/helm-charts-in-rancher}/managing-apps.md (100%) rename versioned_docs/version-2.0-2.4/{helm-charts/multi-cluster-apps => how-to-guides/new-user-guides/helm-charts-in-rancher}/multi-cluster-apps.md (59%) rename versioned_docs/version-2.0-2.4/{helm-charts/tutorial => how-to-guides/new-user-guides/helm-charts-in-rancher}/tutorial.md (91%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md => how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md} (96%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md => how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md} (86%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md => how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md} (91%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md => how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md} (95%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/nginx.md => how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md} (100%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md => how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md} (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md => how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md} (89%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md => how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md} (91%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md => how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md} (89%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/production/recommended-architecture/recommended-architecture.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md} (83%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md} (94%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/imported-clusters/imported-clusters.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md} (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md} (72%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/cloud-providers/amazon => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers}/amazon.md (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/cloud-providers/azure => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers}/azure.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/cloud-providers/gce/gce.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/cloud-providers/vsphere => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers}/vsphere.md (76%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md} (73%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md} (81%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/azure/azure.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md} (73%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md} (88%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md} (64%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md} (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/docs-for-2.1-and-2.2.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md} (85%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/node-requirements/node-requirements.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md} (79%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/aks => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers}/aks.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md} (82%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/gke => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers}/gke.md (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md} (89%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md => how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md} (88%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/configmaps => how-to-guides/new-user-guides/kubernetes-resources-setup}/configmaps.md (90%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/service-discovery/service-discovery.md => how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md} (100%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/certificates/certificates.md => how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md} (97%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md => how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md} (97%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/hpa-for-rancher-before-2_0_7.md => how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md} (100%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md => how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md} (98%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md => how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md} (84%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md => how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md} (99%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/registries/registries.md => how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md} (100%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md => how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md} (96%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md => how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md} (100%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/secrets => how-to-guides/new-user-guides/kubernetes-resources-setup}/secrets.md (89%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/workloads/add-a-sidecar => how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods}/add-a-sidecar.md (98%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/workloads/deploy-workloads => how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods}/deploy-workloads.md (81%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md => how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md} (100%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/workloads/upgrade-workloads => how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods}/upgrade-workloads.md (100%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/discover-services => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x}/discover-services.md (90%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/expose-services => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x}/expose-services.md (95%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/get-started/get-started.md => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md} (67%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/kub-intro/kub-intro.md => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md} (97%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/load-balancing => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x}/load-balancing.md (87%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/run-migration-tool/run-migration-tool.md => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md} (87%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/monitor-apps => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x}/monitor-apps.md (86%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/schedule-workloads/schedule-workloads.md => how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md} (91%) delete mode 100644 versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/rke-add-on.md delete mode 100644 versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md delete mode 100644 versioned_docs/version-2.0-2.4/installation/resources/resources.md delete mode 100644 versioned_docs/version-2.0-2.4/k8s-in-rancher/k8s-in-rancher.md rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/authentication.md => pages-for-subheaders/about-authentication.md} (65%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/rke-templates.md => pages-for-subheaders/about-rke1-templates.md} (53%) rename versioned_docs/version-2.0-2.4/{api/api.md => pages-for-subheaders/about-the-api.md} (87%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-access/cluster-access.md => pages-for-subheaders/access-clusters.md} (55%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/advanced.md => pages-for-subheaders/advanced-options.md} (100%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/air-gap-helm2 => pages-for-subheaders}/air-gap-helm2.md (76%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/air-gap/air-gap.md => pages-for-subheaders/air-gapped-helm-cli-install.md} (56%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md rename versioned_docs/version-2.0-2.4/{admin-settings/admin-settings.md => pages-for-subheaders/authentication-permissions-and-global-configuration.md} (65%) rename versioned_docs/version-2.0-2.4/{backups/backups.md => pages-for-subheaders/backup-restore-and-disaster-recovery.md} (79%) rename versioned_docs/version-2.0-2.4/{best-practices => pages-for-subheaders}/best-practices.md (95%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/production/production.md => pages-for-subheaders/checklist-for-production-ready-clusters.md} (68%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cis-scans => pages-for-subheaders}/cis-scans.md (90%) rename versioned_docs/version-2.0-2.4/{cli/cli.md => pages-for-subheaders/cli-with-rancher.md} (77%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-alerts => pages-for-subheaders}/cluster-alerts.md (91%) rename versioned_docs/version-2.0-2.4/{cluster-admin/editing-clusters/editing-clusters.md => pages-for-subheaders/cluster-configuration.md} (63%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-logging => pages-for-subheaders}/cluster-logging.md (85%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/cluster-monitoring => pages-for-subheaders}/cluster-monitoring.md (83%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/cluster-yml-templates/cluster-yml-templates.md => pages-for-subheaders/cluster-yml.md} (100%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/microsoft-adfs/microsoft-adfs.md => pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md} (65%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/openldap/openldap.md => pages-for-subheaders/configure-openldap.md} (92%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/shibboleth/shibboleth.md => pages-for-subheaders/configure-shibboleth-saml.md} (92%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/volumes-and-storage.md => pages-for-subheaders/create-kubernetes-persistent-storage.md} (59%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md rename versioned_docs/version-2.0-2.4/{installation/resources/feature-flags/feature-flags.md => pages-for-subheaders/enable-experimental-features.md} (91%) rename versioned_docs/version-2.0-2.4/{helm-charts/helm-charts.md => pages-for-subheaders/helm-charts-in-rancher.md} (83%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-rancher => pages-for-subheaders}/helm-rancher.md (82%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/create-nodes-lb/create-nodes-lb.md => pages-for-subheaders/helm2-create-nodes-lb.md} (74%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/helm-init/helm-init.md => pages-for-subheaders/helm2-helm-init.md} (87%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/kubernetes-rke/kubernetes-rke.md => pages-for-subheaders/helm2-kubernetes-rke.md} (84%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/layer-4-lb/layer-4-lb.md => pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md} (94%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/layer-7-lb/layer-7-lb.md => pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md} (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2/rke-add-on/troubleshooting/troubleshooting.md => pages-for-subheaders/helm2-rke-add-on-troubleshooting.md} (62%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md rename versioned_docs/version-2.0-2.4/{installation/resources/advanced/helm2 => pages-for-subheaders}/helm2.md (66%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md => pages-for-subheaders/horizontal-pod-autoscaler.md} (70%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-autoscaler/cluster-autoscaler.md => pages-for-subheaders/install-cluster-autoscaler.md} (91%) rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/install-rancher-on-k8s.md => pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md} (81%) rename versioned_docs/version-2.0-2.4/{installation/installation.md => pages-for-subheaders/installation-and-upgrade.md} (53%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md rename versioned_docs/version-2.0-2.4/{installation/requirements/requirements.md => pages-for-subheaders/installation-requirements.md} (84%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/istio => pages-for-subheaders}/istio.md (90%) rename versioned_docs/version-2.0-2.4/{installation/resources/k8s-tutorials/k8s-tutorials.md => pages-for-subheaders/kubernetes-cluster-setup.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/cluster-provisioning.md => pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md} (75%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/rke-clusters.md => pages-for-subheaders/launch-kubernetes-with-rancher.md} (61%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md => pages-for-subheaders/load-balancer-and-ingress-controller.md} (77%) rename versioned_docs/version-2.0-2.4/{cluster-admin/cluster-admin.md => pages-for-subheaders/manage-clusters.md} (67%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md rename versioned_docs/version-2.0-2.4/{project-admin/resource-quotas/resource-quotas.md => pages-for-subheaders/manage-project-resource-quotas.md} (84%) rename versioned_docs/version-2.0-2.4/{project-admin/project-admin.md => pages-for-subheaders/manage-projects.md} (53%) rename versioned_docs/version-2.0-2.4/{admin-settings/rbac/rbac.md => pages-for-subheaders/manage-role-based-access-control-rbac.md} (72%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/v1.6-migration.md => pages-for-subheaders/migrate-from-v1.6-v2.x.md} (69%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods => pages-for-subheaders}/other-installation-methods.md (57%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md rename versioned_docs/version-2.0-2.4/{pipelines => pages-for-subheaders}/pipelines.md (92%) rename versioned_docs/version-2.0-2.4/{project-admin/tools/tools.md => pages-for-subheaders/project-tools.md} (57%) rename versioned_docs/version-2.0-2.4/{cluster-admin/volumes-and-storage/examples/examples.md => pages-for-subheaders/provisioning-storage-examples.md} (57%) rename versioned_docs/version-2.0-2.4/{quick-start-guide/quick-start-guide.md => pages-for-subheaders/quick-start-guides.md} (57%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/behind-proxy/behind-proxy.md => pages-for-subheaders/rancher-behind-an-http-proxy.md} (50%) rename versioned_docs/version-2.0-2.4/{overview/architecture/architecture.md => pages-for-subheaders/rancher-manager-architecture.md} (84%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/single-node-docker.md => pages-for-subheaders/rancher-on-a-single-node-with-docker.md} (81%) rename versioned_docs/version-2.0-2.4/{security/security.md => pages-for-subheaders/rancher-security.md} (70%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md rename versioned_docs/version-2.0-2.4/{security/rancher-2.1/rancher-2.1.md => pages-for-subheaders/rancher-v2.1-hardening-guides.md} (58%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.2/rancher-2.2.md => pages-for-subheaders/rancher-v2.2-hardening-guides.md} (59%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-2.3.x.md => pages-for-subheaders/rancher-v2.3-hardening-guides.md} (51%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.4/rancher-2.4.md => pages-for-subheaders/rancher-v2.4-hardening-guides.md} (58%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md => pages-for-subheaders/set-up-cloud-providers.md} (60%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md => pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md} (67%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/upgrades => pages-for-subheaders}/upgrades.md (78%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md => pages-for-subheaders/use-existing-nodes.md} (70%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/node-pools.md => pages-for-subheaders/use-new-nodes-in-an-infra-provider.md} (87%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md => pages-for-subheaders/use-windows-clusters.md} (81%) create mode 100644 versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere => pages-for-subheaders}/vsphere.md (72%) rename versioned_docs/version-2.0-2.4/{k8s-in-rancher/workloads/workloads.md => pages-for-subheaders/workloads-and-pods.md} (92%) delete mode 100644 versioned_docs/version-2.0-2.4/quick-start-guide/deployment/deployment.md delete mode 100644 versioned_docs/version-2.0-2.4/quick-start-guide/workload/workload.md create mode 100644 versioned_docs/version-2.0-2.4/reference-guides.md rename versioned_docs/version-2.0-2.4/{api/api-tokens => reference-guides/about-the-api}/api-tokens.md (100%) rename versioned_docs/version-2.0-2.4/{best-practices/containers => reference-guides/best-practices}/containers.md (100%) rename versioned_docs/version-2.0-2.4/{best-practices/deployment-strategies => reference-guides/best-practices}/deployment-strategies.md (93%) rename versioned_docs/version-2.0-2.4/{best-practices/deployment-types => reference-guides/best-practices}/deployment-types.md (79%) rename versioned_docs/version-2.0-2.4/{best-practices/management => reference-guides/best-practices}/management.md (93%) create mode 100644 versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md create mode 100644 versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md} (58%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md} (100%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/prior-to-2.0.4.md => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md} (92%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4 => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere}/v2.0.4.md (92%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0 => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere}/v2.2.0.md (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0 => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere}/v2.3.0.md (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3 => reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere}/v2.3.3.md (96%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/options/options.md => reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md} (67%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md => reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md} (93%) rename versioned_docs/version-2.0-2.4/{admin-settings/authentication/openldap/openldap-config/openldap-config.md => reference-guides/configure-openldap/openldap-config-reference.md} (90%) rename versioned_docs/version-2.0-2.4/{cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md => reference-guides/installation-references/amazon-eks-permissions.md} (98%) create mode 100644 versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md rename versioned_docs/version-2.0-2.4/{installation/install-rancher-on-k8s/chart-options/chart-options.md => reference-guides/installation-references/helm-chart-options.md} (92%) rename versioned_docs/version-2.0-2.4/{installation/resources/tls-settings => reference-guides/installation-references}/tls-settings.md (88%) rename versioned_docs/version-2.0-2.4/{overview/concepts/concepts.md => reference-guides/kubernetes-concepts.md} (98%) rename versioned_docs/version-2.0-2.4/{pipelines/concepts => reference-guides/pipelines}/concepts.md (100%) rename versioned_docs/version-2.0-2.4/{pipelines/storage/storage.md => reference-guides/pipelines/configure-persistent-data.md} (93%) rename versioned_docs/version-2.0-2.4/{pipelines/example-repos/example-repos.md => reference-guides/pipelines/example-repositories.md} (90%) rename versioned_docs/version-2.0-2.4/{pipelines/example/example.md => reference-guides/pipelines/example-yaml.md} (90%) rename versioned_docs/version-2.0-2.4/{pipelines/config/config.md => reference-guides/pipelines/pipeline-configuration.md} (94%) rename versioned_docs/version-2.0-2.4/{pipelines/docs-for-v2.0.x/docs-for-v2.0.x.md => reference-guides/pipelines/v2.0.x.md} (97%) rename versioned_docs/version-2.0-2.4/{cluster-admin/tools/tools.md => reference-guides/rancher-cluster-tools.md} (72%) rename versioned_docs/version-2.0-2.4/{overview/architecture-recommendations => reference-guides/rancher-manager-architecture}/architecture-recommendations.md (84%) create mode 100644 versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md create mode 100644 versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md rename versioned_docs/version-2.0-2.4/{project-admin/tools/project-alerts => reference-guides/rancher-project-tools}/project-alerts.md (85%) rename versioned_docs/version-2.0-2.4/{project-admin/tools/project-logging => reference-guides/rancher-project-tools}/project-logging.md (80%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.1/hardening-2.1/hardening-2.1.md => reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.1/benchmark-2.1/benchmark-2.1.md => reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.2/hardening-2.2/hardening-2.2.md => reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.2/benchmark-2.2/benchmark-2.2.md => reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/hardening-2.3.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/benchmark-2.3.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/hardening-2.3.3.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/benchmark-2.3.3.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/hardening-2.3.5.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/benchmark-2.3.5.md => reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.4/hardening-2.4/hardening-2.4.md => reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md} (99%) rename versioned_docs/version-2.0-2.4/{security/rancher-2.4/benchmark-2.4/benchmark-2.4.md => reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md} (100%) rename versioned_docs/version-2.0-2.4/{security/cve/cve.md => reference-guides/rancher-security/security-advisories-and-cves.md} (98%) rename versioned_docs/version-2.0-2.4/{admin-settings/rke-templates/example-yaml/example-yaml.md => reference-guides/rke1-template-example-yaml.md} (100%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/advanced/advanced.md => reference-guides/single-node-rancher-in-docker/advanced-options.md} (91%) rename versioned_docs/version-2.0-2.4/{installation/other-installation-methods/single-node-docker/proxy/proxy.md => reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md} (94%) rename versioned_docs/version-2.0-2.4/{system-tools => reference-guides}/system-tools.md (83%) rename versioned_docs/version-2.0-2.4/{user-settings/api-keys => reference-guides/user-settings}/api-keys.md (92%) rename versioned_docs/version-2.0-2.4/{user-settings/cloud-credentials/cloud-credentials.md => reference-guides/user-settings/manage-cloud-credentials.md} (70%) rename versioned_docs/version-2.0-2.4/{user-settings/node-templates/node-templates.md => reference-guides/user-settings/manage-node-templates.md} (59%) rename versioned_docs/version-2.0-2.4/{user-settings/preferences/preferences.md => reference-guides/user-settings/user-preferences.md} (100%) rename versioned_docs/version-2.0-2.4/{v1.6-migration/run-migration-tool/migration-tools-ref/migration-tools-ref.md => reference-guides/v1.6-migration/migration-tools-cli-reference.md} (100%) rename versioned_docs/version-2.0-2.4/{troubleshooting => }/troubleshooting.md (51%) delete mode 100644 versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/kubernetes-components.md rename versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/{controlplane/controlplane.md => troubleshooting-controlplane-nodes.md} (91%) rename versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/{etcd/etcd.md => troubleshooting-etcd-nodes.md} (100%) rename versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/{nginx-proxy/nginx-proxy.md => troubleshooting-nginx-proxy.md} (100%) rename versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/{worker-and-generic/worker-and-generic.md => troubleshooting-worker-nodes-and-generic-components.md} (100%) rename versioned_docs/version-2.0-2.4/troubleshooting/{dns => other-troubleshooting-tips}/dns.md (92%) rename versioned_docs/version-2.0-2.4/troubleshooting/{kubernetes-resources => other-troubleshooting-tips}/kubernetes-resources.md (97%) rename versioned_docs/version-2.0-2.4/troubleshooting/{logging => other-troubleshooting-tips}/logging.md (100%) rename versioned_docs/version-2.0-2.4/troubleshooting/{networking => other-troubleshooting-tips}/networking.md (90%) rename versioned_docs/version-2.0-2.4/troubleshooting/{rancherha/rancherha.md => other-troubleshooting-tips/rancher-ha.md} (100%) rename versioned_docs/version-2.0-2.4/troubleshooting/{imported-clusters/imported-clusters.md => other-troubleshooting-tips/registered-clusters.md} (100%) delete mode 100644 versioned_docs/version-2.0-2.4/user-settings/user-settings.md diff --git a/versioned_docs/version-2.0-2.4/admin-settings/drivers/drivers.md b/versioned_docs/version-2.0-2.4/admin-settings/drivers/drivers.md deleted file mode 100644 index 2ae3ad49450..00000000000 --- a/versioned_docs/version-2.0-2.4/admin-settings/drivers/drivers.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Provisioning Drivers -weight: 1140 ---- - -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -### Rancher Drivers - -With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. - -There are two types of drivers within Rancher: - -* [Cluster Drivers](#cluster-drivers) -* [Node Drivers](#node-drivers) - -### Cluster Drivers - -_Available as of v2.2.0_ - -Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. - -By default, Rancher has activated several hosted Kubernetes cloud providers including: - -* [Amazon EKS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) - -There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: - -* [Alibaba ACK]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) - -### Node Drivers - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: - -* [Amazon EC2]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/versioned_docs/version-2.0-2.4/backups/backup/backup.md b/versioned_docs/version-2.0-2.4/backups/backup/backup.md index d74a41ca001..98597995706 100644 --- a/versioned_docs/version-2.0-2.4/backups/backup/backup.md +++ b/versioned_docs/version-2.0-2.4/backups/backup/backup.md @@ -13,10 +13,10 @@ aliases: This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. - Rancher server backups: - - [Rancher installed on a K3s Kubernetes cluster](./k3s-backups) - - [Rancher installed on an RKE Kubernetes cluster](./rke-backups) - - [Rancher installed with Docker](./docker-backups) + - [Rancher installed on a K3s Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md) + - [Rancher installed on an RKE Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) + - [Rancher installed with Docker](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) -For information on backing up Rancher launched Kubernetes clusters, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/) +For information on backing up Rancher launched Kubernetes clusters, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md) -If you are looking to back up your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), please refer [here]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/). +If you are looking to back up your [Rancher launched Kubernetes cluster](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), please refer [here](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/backups/restore/restore.md b/versioned_docs/version-2.0-2.4/backups/restore/restore.md index bb6569b5e89..98211eedc30 100644 --- a/versioned_docs/version-2.0-2.4/backups/restore/restore.md +++ b/versioned_docs/version-2.0-2.4/backups/restore/restore.md @@ -9,8 +9,8 @@ aliases: --- If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. -- [Restoring backups for Rancher installed with Docker](./docker-restores) -- [Restoring backups for Rancher installed on an RKE Kubernetes cluster](./rke-restore) -- [Restoring backups for Rancher installed on a K3s Kubernetes cluster](./k3s-restore) +- [Restoring backups for Rancher installed with Docker](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) +- [Restoring backups for Rancher installed on an RKE Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) +- [Restoring backups for Rancher installed on a K3s Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md) -If you are looking to restore your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), please refer to [this section]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). +If you are looking to restore your [Rancher launched Kubernetes cluster](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), please refer to [this section](../../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/setup.md b/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/setup.md deleted file mode 100644 index 3f44270f3a7..00000000000 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/setup.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Setup Guide -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup - - /rancher/v2.0-v2.4/en/istio/legacy/setup - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/ ---- - -This section describes how to enable Istio and start using it in your projects. - -This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) -1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/#view-traffic) - diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-capabilities-table/index.md b/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-capabilities-table/index.md index 1b6e653f609..676346ba3a6 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-capabilities-table/index.md +++ b/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-capabilities-table/index.md @@ -1,20 +1,20 @@ -| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters) | +| Action | [Rancher launched Kubernetes Clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) | [Hosted Kubernetes Clusters](../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | [Imported Clusters](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) | | --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) | ✓ | ✓ | * | -| [Managing Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/) | ✓ | ✓ | ✓ | -| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) | ✓ | ✓ | ✓ | -| [Cloning Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cloning-clusters/)| ✓ | ✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/) | ✓ | | | -| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/) | ✓ | | | -| [Ability to recover and restore etcd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/) | ✓ | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy/) | ✓ | | | -| [Running Security Scans]({{}}/rancher/v2.0-v2.4/en/security/security-scan/) | ✓ | | | -| [Authorized Cluster Endpoint]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | +| [Using kubectl and a kubeconfig file to Access a Cluster](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) | ✓ | ✓ | ✓ | +| [Managing Cluster Members](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) | ✓ | ✓ | ✓ | +| [Editing and Upgrading Clusters](../../pages-for-subheaders/cluster-configuration.md) | ✓ | ✓ | * | +| [Managing Nodes](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) | ✓ | ✓ | ✓ | +| [Managing Persistent Volumes and Storage Classes](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) | ✓ | ✓ | ✓ | +| [Managing Projects, Namespaces and Workloads](../../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) | ✓ | ✓ | ✓ | +| [Using App Catalogs](catalog/) | ✓ | ✓ | ✓ | +| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)](../../reference-guides/rancher-cluster-tools.md) | ✓ | ✓ | ✓ | +| [Cloning Clusters](../../how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md)| ✓ | ✓ | | +| [Ability to rotate certificates](../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md) | ✓ | | | +| [Ability to back up your Kubernetes Clusters](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md) | ✓ | | | +| [Ability to recover and restore etcd](../../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md) | ✓ | | | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher](../../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) | ✓ | | | +| [Configuring Pod Security Policies](../../how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md) | ✓ | | | +| [Running Security Scans](security/security-scan/) | ✓ | | | +| [Authorized Cluster Endpoint](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#authorized-cluster-endpoint) | ✓ | | | \* Cluster configuration options can't be edited for imported clusters, except for K3s clusters. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md b/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md deleted file mode 100644 index 665733f833d..00000000000 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: VSphere Node Template Configuration -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference - - /rancher/v2.0-v2.4/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids ---- - -The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: - -- [v2.3.3](./v2.3.3) -- [v2.3.0](./v2.3.0) -- [v2.2.0](./v2.2.0) -- [v2.0.4](./v2.0.4) - -For Rancher versions before v2.0.4, refer to [this version.](./prior-to-2.0.4) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/contributing/contributing.md b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md similarity index 99% rename from versioned_docs/version-2.0-2.4/contributing/contributing.md rename to versioned_docs/version-2.0-2.4/contribute-to-rancher.md index bc01f70c5e5..da4e11a73ac 100644 --- a/versioned_docs/version-2.0-2.4/contributing/contributing.md +++ b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md @@ -38,7 +38,7 @@ loglevel repository | https://github.com/rancher/loglevel | This repository is t To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. -![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
+![Rancher diagram](/img/ranchercomponentsdiagram.svg)
Rancher components used for provisioning/managing Kubernetes clusters. # Building diff --git a/versioned_docs/version-2.0-2.4/explanations.md b/versioned_docs/version-2.0-2.4/explanations.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cis-scans/skipped-tests/skipped-tests.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cis-scans/skipped-tests/skipped-tests.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/default-alerts/default-alerts.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md similarity index 91% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/default-alerts/default-alerts.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md index 0ae49c9a737..fd5baee6931 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/default-alerts/default-alerts.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md @@ -7,9 +7,9 @@ aliases: - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts --- -When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them. +When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier](../notifiers.md) for them. -Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). +Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions](monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). # Alerts for etcd Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. @@ -57,4 +57,4 @@ Alerts can be triggered based on node metrics. Each computing resource in a Kube | Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | # Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/) +When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.](project-admin/tools/alerts/) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/elasticsearch/elasticsearch.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/elasticsearch.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/elasticsearch/elasticsearch.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/elasticsearch.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/fluentd/fluentd.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/fluentd.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/fluentd/fluentd.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/fluentd.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/kafka/kafka.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/kafka.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/kafka/kafka.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/kafka.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/splunk/splunk.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md similarity index 96% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/splunk/splunk.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md index f5510ee8222..ea64c24e5fa 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/splunk/splunk.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md @@ -60,10 +60,10 @@ If your instance of Splunk uses SSL, your **Endpoint** will need to begin with ` 1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. 1. Click on Data Summary and select the Sources tab. - ![View Logs]({{}}/img/rancher/splunk/splunk4.jpg) + ![View Logs](/img/splunk/splunk4.jpg) 1. To view the actual logs, click on the source that you declared earlier. - ![View Logs]({{}}/img/rancher/splunk/splunk5.jpg) + ![View Logs](/img/splunk/splunk5.jpg) ## Troubleshooting diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/syslog/syslog.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/syslog.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/syslog/syslog.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/syslog.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-metrics/cluster-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md similarity index 84% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-metrics/cluster-metrics.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md index c6ea196adac..b737c93065a 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-metrics/cluster-metrics.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md @@ -41,11 +41,11 @@ Some of the biggest metrics to look out for: 1. Click on **Node Metrics**. -[_Get expressions for Cluster Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#cluster-metrics) +[_Get expressions for Cluster Metrics_](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#cluster-metrics) ### Etcd Metrics ->**Note:** Only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). +>**Note:** Only supported for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! @@ -61,13 +61,13 @@ Some of the biggest metrics to look out for: If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. -[_Get expressions for Etcd Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#etcd-metrics) +[_Get expressions for Etcd Metrics_](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#etcd-metrics) ### Kubernetes Components Metrics Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. ->**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). +>**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. @@ -93,13 +93,13 @@ Some of the more important component metrics to monitor are: How fast ingress is routing connections to your cluster services. -[_Get expressions for Kubernetes Component Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/#kubernetes-components-metrics) +[_Get expressions for Kubernetes Component Metrics_](monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/#kubernetes-components-metrics) ## Rancher Logging Metrics -Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/). +Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service](cluster-admin/tools/logging/). -[_Get expressions for Rancher Logging Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#rancher-logging-metrics) +[_Get expressions for Rancher Logging Metrics_](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#rancher-logging-metrics) ## Finding Workload Metrics @@ -116,4 +116,4 @@ Workload metrics display the hardware utilization for a Kubernetes workload. You - **View the Pod Metrics:** Click on **Pod Metrics**. - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. -[_Get expressions for Workload Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) +[_Get expressions for Workload Metrics_](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/custom-metrics/custom-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md similarity index 98% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/custom-metrics/custom-metrics.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md index f710ae39b76..d969b8c0af5 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/custom-metrics/custom-metrics.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md @@ -9,7 +9,7 @@ aliases: - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/ --- -After you've enabled [cluster level monitoring]({{< baseurl >}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. +After you've enabled [cluster level monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. ## Deploy Prometheus Custom Metrics Adapter diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/expression/expression.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md similarity index 99% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/expression/expression.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md index 9109666fa00..163a0041dc0 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/expression/expression.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md @@ -9,9 +9,9 @@ aliases: - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/ --- -The PromQL expressions in this doc can be used to configure [alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) +The PromQL expressions in this doc can be used to configure [alerts.](cluster-admin/tools/alerts/) -> Before expressions can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [at the project level.]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/) +> Before expressions can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [at the project level.](project-admin/tools/monitoring/) For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/project-monitoring/project-monitoring.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md similarity index 69% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/project-monitoring/project-monitoring.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md index 94e8203c58b..ee3409ed341 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/project-monitoring/project-monitoring.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md @@ -21,9 +21,9 @@ This section covers the following topics: ### Monitoring Scope -Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. +Using Prometheus, you can monitor Rancher at both the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. -- [Cluster monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. +- [Cluster monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - Kubernetes control plane - etcd database @@ -33,17 +33,17 @@ Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. +Only [administrators](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. ### Enabling Project Monitoring -> **Prerequisite:** Cluster monitoring must be [enabled.]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) +> **Prerequisite:** Cluster monitoring must be [enabled.](monitoring-alerting/legacy/monitoring/cluster-monitoring/) 1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. 1. Select **Tools > Monitoring** in the navigation bar. -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Enter in your desired configuration options. +1. Select **Enable** to show the [Prometheus configuration options](monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Enter in your desired configuration options. 1. Click **Save**. @@ -55,12 +55,12 @@ Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes Grafana | 100m | 100Mi | 200m | 200Mi | No -**Result:** A single application,`project-monitoring`, is added as an [application]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) to the project. After the application is `active`, you can start viewing project metrics through the [Rancher dashboard]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or directly from Grafana. +**Result:** A single application,`project-monitoring`, is added as an [application](catalog/apps/) to the project. After the application is `active`, you can start viewing project metrics through the [Rancher dashboard](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or directly from Grafana. > The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. ### Project Metrics -[Workload metrics]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and at the [project level.](#enabling-project-monitoring) +[Workload metrics](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and at the [project level.](#enabling-project-monitoring) You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/prometheus/prometheus.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md similarity index 92% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/prometheus/prometheus.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md index 01c490eb02f..d103484b7ee 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/prometheus/prometheus.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md @@ -11,7 +11,7 @@ aliases: _Available as of v2.2.0_ -While configuring monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), there are multiple options that can be configured. +While configuring monitoring at either the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level](project-admin/tools/monitoring/), there are multiple options that can be configured. - [Basic Configuration](#basic-configuration) - [Advanced Options](#advanced-options) @@ -36,7 +36,7 @@ Selector | Ability to select the nodes in which Prometheus and Grafana pods are # Advanced Options -Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{}}/rancher/v2.0-v2.4/en/catalog/), it can be configured like any other catalog application, by passing in values to Helm. +Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog](catalog/), it can be configured like any other catalog application, by passing in values to Helm. > **Warning:** Any modification to the application without understanding the entire application can lead to catastrophic errors. @@ -81,7 +81,7 @@ When configuring Prometheus and enabling the node exporter, enter a host port in # Persistent Storage ->**Prerequisite:** Configure one or more StorageClasses to use as [persistent storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. +>**Prerequisite:** Configure one or more StorageClasses to use as [persistent storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) for your Prometheus or Grafana pod. By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/viewing-metrics/viewing-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md similarity index 82% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/viewing-metrics/viewing-metrics.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md index f3748f37e39..74cfcae50da 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/viewing-metrics/viewing-metrics.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md @@ -11,11 +11,11 @@ aliases: _Available as of v2.2.0_ -After you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), you will want to be start viewing the data being collected. There are multiple ways to view this data. +After you've enabled monitoring at either the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level](project-admin/tools/monitoring/), you will want to be start viewing the data being collected. There are multiple ways to view this data. ## Rancher Dashboard ->**Note:** This is only available if you've enabled monitoring at the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/). Project specific analytics must be viewed using the project's Grafana instance. +>**Note:** This is only available if you've enabled monitoring at the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/). Project specific analytics must be viewed using the project's Grafana instance. Rancher's dashboards are available at multiple locations: @@ -39,13 +39,13 @@ When analyzing these metrics, don't be concerned about any single standalone met ## Grafana -If you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. +If you've enabled monitoring at either the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level](project-admin/tools/monitoring/), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). ### Authentication -Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. +Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md). In other words, a user's access in Grafana mirrors their access in Rancher. When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/resources/resources.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/resources/resources.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/disabling-istio/disabling-istio.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/disable-istio.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/disabling-istio/disabling-istio.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/disable-istio.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/rbac/rbac.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/rbac-for-istio.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/rbac/rbac.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/rbac-for-istio.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/release-notes/release-notes.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/release-notes.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/release-notes/release-notes.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/release-notes.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/notifiers/notifiers.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md similarity index 95% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/notifiers/notifiers.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md index 1d4887810e2..f1c725449e5 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/notifiers/notifiers.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md @@ -203,5 +203,5 @@ After you set up notifiers, you can manage them. From the **Global** view, open After creating a notifier, set up alerts to receive notifications of Rancher system events. -- [Cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/). -- [Project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/). +- [Cluster owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) can set up alerts at the [cluster level](cluster-admin/tools/alerts/). +- [Project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can set up alerts at the [project level](project-admin/tools/alerts/). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/opa-gatekeeper/opa-gatekeeper.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md similarity index 98% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/opa-gatekeeper/opa-gatekeeper.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md index 4161da3c3c4..e8347f6f87e 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/opa-gatekeeper/opa-gatekeeper.md +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md @@ -32,7 +32,7 @@ OPA Gatekeeper is made available via Rancher's Helm system chart, and it is inst > **Prerequisites:** > > - Only administrators and cluster owners can enable OPA Gatekeeper. -> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) +> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.](installation/options/feature-flags/) 1. Navigate to the cluster's **Dashboard** view. 1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** diff --git a/versioned_docs/version-2.0-2.4/faq/faq.md b/versioned_docs/version-2.0-2.4/faq.md similarity index 88% rename from versioned_docs/version-2.0-2.4/faq/faq.md rename to versioned_docs/version-2.0-2.4/faq.md index 3c43e002b4a..2c580cdfba5 100644 --- a/versioned_docs/version-2.0-2.4/faq/faq.md +++ b/versioned_docs/version-2.0-2.4/faq.md @@ -7,7 +7,7 @@ aliases: This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. -See [Technical FAQ]({{}}/rancher/v2.0-v2.4/en/faq/technical/), for frequently asked technical questions. +See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions.
@@ -25,13 +25,13 @@ Yes. **Does Rancher support Windows?** -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.](pages-for-subheaders/use-windows-clusters.md)
**Does Rancher support Istio?** -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) +As of Rancher 2.3.0, we support [Istio.](pages-for-subheaders/istio.md) Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) diff --git a/versioned_docs/version-2.0-2.4/faq/networking/cni-providers/cni-providers.md b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md similarity index 85% rename from versioned_docs/version-2.0-2.4/faq/networking/cni-providers/cni-providers.md rename to versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md index a1fbf15e9f4..4eb3d0b5626 100644 --- a/versioned_docs/version-2.0-2.4/faq/networking/cni-providers/cni-providers.md +++ b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md @@ -10,7 +10,7 @@ CNI (Container Network Interface), a [Cloud Native Computing Foundation project] Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. -![CNI Logo]({{}}/img/rancher/cni-logo.png) +![CNI Logo](/img/cni-logo.png) For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). @@ -28,7 +28,7 @@ This network model is used when an extended L2 bridge is preferred. This network CNI network providers using this network model include Flannel, Canal, and Weave. -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) +![Encapsulated Network](/img/encapsulated-network.png) #### What is an Unencapsulated Network? @@ -40,7 +40,7 @@ This network model is used when a routed L3 network is preferred. This mode dyna CNI network providers using this network model include Calico and Romana. -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) +![Unencapsulated Network](/img/unencapsulated-network.png) ### What CNI Providers are Provided by Rancher? @@ -48,43 +48,43 @@ Out-of-the-box, Rancher provides the following CNI network providers for Kuberne #### Canal -![Canal Logo]({{}}/img/rancher/canal-logo.png) +![Canal Logo](/img/canal-logo.png) Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} +![](/img/canal-diagram.png) For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) #### Flannel -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) +![Flannel Logo](/img/flannel-logo.png) Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) +![Flannel Diagram](/img/flannel-diagram.png) For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). #### Calico -![Calico Logo]({{}}/img/rancher/calico-logo.png) +![Calico Logo](/img/calico-logo.png) Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. -Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. +Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) +![Calico Diagram](/img/calico-diagram.svg) For more information, see the following pages: @@ -94,13 +94,13 @@ For more information, see the following pages: #### Weave -![Weave Logo]({{}}/img/rancher/weave-logo.png) +![Weave Logo](/img/weave-logo.png) _Available as of v2.2.0_ Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. For more information, see the following pages: @@ -151,4 +151,4 @@ As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it ### How can I configure a CNI network provider? -Please see [Cluster Options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). +Please see [Cluster Options](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) and the options for [Network Plug-ins](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/versioned_docs/version-2.0-2.4/faq/kubectl/kubectl.md b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md similarity index 100% rename from versioned_docs/version-2.0-2.4/faq/kubectl/kubectl.md rename to versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md diff --git a/versioned_docs/version-2.0-2.4/faq/networking.md b/versioned_docs/version-2.0-2.4/faq/networking.md new file mode 100644 index 00000000000..580786492ce --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/networking.md @@ -0,0 +1,9 @@ +--- +title: Networking +weight: 8005 +--- + +Networking FAQ's + +- [CNI Providers](container-network-interface-providers.md) + diff --git a/versioned_docs/version-2.0-2.4/faq/networking/networking.md b/versioned_docs/version-2.0-2.4/faq/networking/networking.md deleted file mode 100644 index 9551f35a3a9..00000000000 --- a/versioned_docs/version-2.0-2.4/faq/networking/networking.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Networking -weight: 8005 ---- - -Networking FAQ's - -- [CNI Providers]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/) - diff --git a/versioned_docs/version-2.0-2.4/faq/removing-rancher/removing-rancher.md b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md similarity index 73% rename from versioned_docs/version-2.0-2.4/faq/removing-rancher/removing-rancher.md rename to versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md index f3b448c2498..d2180ad5d71 100644 --- a/versioned_docs/version-2.0-2.4/faq/removing-rancher/removing-rancher.md +++ b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md @@ -26,11 +26,11 @@ The capability to access a downstream cluster without Rancher depends on the typ - **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. - **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. -- **RKE clusters:** To access an [RKE cluster,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. +- **RKE clusters:** To access an [RKE cluster,](../pages-for-subheaders/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../pages-for-subheaders/rancher-manager-architecture.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. ### What if I don't want Rancher anymore? -If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) remove Rancher by using the [System Tools]({{}}/rancher/v2.0-v2.4/en/system-tools/) with the `remove` subcommand. +If you [installed Rancher on a Kubernetes cluster,](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [System Tools](../reference-guides/system-tools.md) with the `remove` subcommand. If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. diff --git a/versioned_docs/version-2.0-2.4/faq/security/security.md b/versioned_docs/version-2.0-2.4/faq/security.md similarity index 60% rename from versioned_docs/version-2.0-2.4/faq/security/security.md rename to versioned_docs/version-2.0-2.4/faq/security.md index 0e826a53a04..aee42e0fb93 100644 --- a/versioned_docs/version-2.0-2.4/faq/security/security.md +++ b/versioned_docs/version-2.0-2.4/faq/security.md @@ -6,10 +6,10 @@ weight: 8007 **Is there a Hardening Guide?** -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.0-v2.4/en/security/) section. +The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section.
**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.0-v2.4/en/security/) section. +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.0-2.4/faq/technical/technical.md b/versioned_docs/version-2.0-2.4/faq/technical-items.md similarity index 87% rename from versioned_docs/version-2.0-2.4/faq/technical/technical.md rename to versioned_docs/version-2.0-2.4/faq/technical-items.md index 61d8de21421..bfc57d54224 100644 --- a/versioned_docs/version-2.0-2.4/faq/technical/technical.md +++ b/versioned_docs/version-2.0-2.4/faq/technical-items.md @@ -22,7 +22,7 @@ New password for default administrator (user-xxxxx): > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. Kubernetes install (RKE add-on): ``` @@ -51,7 +51,7 @@ New password for default administrator (user-xxxxx): > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. Kubernetes install (RKE add-on): ``` @@ -63,7 +63,7 @@ New password for default admin user (user-xxxxx): ### How can I enable debug logging? -See [Troubleshooting: Logging]({{}}/rancher/v2.0-v2.4/en/troubleshooting/logging/) +See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) ### My ClusterIP does not respond to ping @@ -75,7 +75,7 @@ Node Templates can be accessed by opening your account menu (top right) and sele ### Why is my Layer-4 Load Balancer in `Pending` state? -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) +The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) ### Where is the state of Rancher stored? @@ -90,7 +90,7 @@ We follow the validated Docker versions for upstream Kubernetes releases. The va SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. -![Download Keys]({{}}/img/rancher/downloadsshkeys.png) +![Download Keys](/img/downloadsshkeys.png) Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) @@ -109,13 +109,13 @@ The UI consists of static files, and works based on responses of the API. That m A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) to clean the node. +When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes](faq/cleaning-cluster-nodes/) to clean the node. When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. ### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? -You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). +You can add additional arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). ### How do I check if my certificate chain is valid? diff --git a/versioned_docs/version-2.0-2.4/faq/telemetry/telemetry.md b/versioned_docs/version-2.0-2.4/faq/telemetry.md similarity index 100% rename from versioned_docs/version-2.0-2.4/faq/telemetry/telemetry.md rename to versioned_docs/version-2.0-2.4/faq/telemetry.md diff --git a/versioned_docs/version-2.0-2.4/faq/upgrades-to-2x/upgrades-to-2x.md b/versioned_docs/version-2.0-2.4/faq/upgrades-to-2x.md similarity index 100% rename from versioned_docs/version-2.0-2.4/faq/upgrades-to-2x/upgrades-to-2x.md rename to versioned_docs/version-2.0-2.4/faq/upgrades-to-2x.md diff --git a/versioned_docs/version-2.0-2.4/getting-started.md b/versioned_docs/version-2.0-2.4/getting-started.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/install-rancher/install-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/install-rancher/install-rancher.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md index 5eb8525e182..78134ce0898 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/install-rancher/install-rancher.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md @@ -33,12 +33,12 @@ This section describes installing Rancher in five parts: From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. -1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. ```plain helm init -c ``` -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/). +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../resources/choose-a-rancher-version.md). {{< release-channel >}} ``` helm repo add rancher- https://releases.rancher.com/server-charts/ @@ -49,7 +49,7 @@ From a system that has access to the internet, fetch the latest Helm chart and c helm fetch rancher-/rancher ``` -> Want additional options? See the Rancher [Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options). +> Want additional options? See the Rancher [Helm chart options](../../../../../reference-guides/installation-references/helm-chart-options.md). ### B. Choose your SSL Configuration @@ -57,7 +57,7 @@ Rancher Server is designed to be secure by default and requires SSL/TLS configur When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination). +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](installation/options/chart-options/#external-tls-termination). | Configuration | Chart option | Description | Requires cert-manager | | ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | @@ -82,7 +82,7 @@ Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-yo By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. > **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](installation/options/upgrading-cert-manager/). 1. From a system connected to the internet, add the cert-manager repo to Helm. ```plain @@ -170,7 +170,7 @@ If you are using a Private CA signed cert, add `--set privateCA=true` following --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts ``` -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. +Then refer to [Adding TLS Secrets](../../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. @@ -218,15 +218,15 @@ kubectl -n cattle-system apply -R -f ./rancher ### E. For Rancher versions before v2.3.0, Configure System Charts -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/). +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](installation/options/local-system-charts/). ### Additional Resources These resources could be helpful when installing Rancher: -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) +- [Rancher Helm chart options](installation/options/chart-options/) +- [Adding TLS secrets](../../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](installation/options/troubleshooting/) @@ -242,10 +242,10 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher > **Do you want to...** > -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#additional-trusted-cas). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](installation/options/chart-options/#additional-trusted-cas). +> - Record all transactions with the Rancher API? See [API Auditing](../../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/) +- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](installation/options/local-system-charts/) Choose from the following options: @@ -259,7 +259,7 @@ Log into your Linux host, and then run the installation command below. When ente | Placeholder | Description | | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/options/server-tags/) that you want to install. | ``` docker run -d --restart=unless-stopped \ @@ -279,7 +279,7 @@ In development or testing environments where your team will access your Rancher > From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. > > - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. @@ -290,7 +290,7 @@ After creating your certificate, log into your Linux host, and then run the inst | `` | The path to the private key for your certificate. | | `` | The path to the certificate authority's certificate. | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/options/server-tags/) that you want to install. | ``` docker run -d --restart=unless-stopped \ @@ -319,7 +319,7 @@ After obtaining your certificate, log into your Linux host, and then run the ins | `` | The path to your full certificate chain. | | `` | The path to the private key for your certificate. | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/options/server-tags/) that you want to install. | > **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. @@ -338,7 +338,7 @@ docker run -d --restart=unless-stopped \ If you are installing Rancher v2.3.0+, the installation is complete. -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/). +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](installation/options/local-system-charts/). diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/launch-kubernetes/launch-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md similarity index 79% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/launch-kubernetes/launch-kubernetes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md index afac79574b6..af86a3bbd02 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/launch-kubernetes/launch-kubernetes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md @@ -9,7 +9,7 @@ aliases: This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. -Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{}}/rke/latest/en/installation/) and create a RKE config file. +Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE](https://rancher.com/docs/rke/latest/en/installation/) and create a RKE config file. - [A. Create an RKE Config File](#a-create-an-rke-config-file) - [B. Run RKE](#b-run-rke) @@ -19,9 +19,9 @@ Since a Kubernetes Installation requires a Kubernetes cluster, we will create a From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts) you created. +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes](installation/air-gap-high-availability/provision-hosts) you created. -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). +> **Tip:** For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/).
RKE Options
@@ -76,9 +76,9 @@ rke up --config ./rancher-cluster.yml Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher) +### [Next: Install Rancher](../../../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/populate-private-registry/populate-private-registry.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md similarity index 92% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/populate-private-registry/populate-private-registry.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md index 57cf8d5c83a..8b82e241c3b 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/populate-private-registry/populate-private-registry.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md @@ -18,11 +18,11 @@ import TabItem from '@theme/TabItem'; > > **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. +By default, all images used to [provision Kubernetes clusters](../../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any [tools](../../../../../reference-guides/rancher-cluster-tools.md) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. -By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed for a Windows cluster. +By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters](../../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed for a Windows cluster. @@ -58,7 +58,7 @@ In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). ```plain helm repo add jetstack https://charts.jetstack.io @@ -224,7 +224,7 @@ The workstation must have Docker 18.02+ in order to support manifests, which are **For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). ```plain helm repo add jetstack https://charts.jetstack.io helm repo update @@ -277,6 +277,6 @@ Move the images in the `rancher-images.tar.gz` to your private registry using th -### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) +### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE](../../../other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) -### [Next: Docker Installs - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) +### [Next: Docker Installs - Install Rancher](../../../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/prepare-nodes/prepare-nodes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md similarity index 85% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/prepare-nodes/prepare-nodes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md index 8937f04dbcc..06f391422a1 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/prepare-nodes/prepare-nodes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md @@ -20,7 +20,7 @@ This section is about how to prepare your node(s) to install Rancher for your ai ### OS, Docker, Hardware, and Networking -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your node(s) fulfill the general [installation requirements.](../../../../../pages-for-subheaders/installation-requirements.md) ### Private Registry @@ -33,8 +33,8 @@ If you need help with creating a private registry, please refer to the [Docker d The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +- [rke](https://rancher.com/docs/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. @@ -42,7 +42,7 @@ The following CLI tools are required for the Kubernetes Install. Make sure these ### OS, Docker, Hardware, and Networking -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your node(s) fulfill the general [installation requirements.](../../../../../pages-for-subheaders/installation-requirements.md) ### Private Registry @@ -69,13 +69,13 @@ Rancher recommends installing Rancher on a Kubernetes cluster. A highly availabl
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) +![Rancher HA](/img/ha/rancher2ha.svg) ### A. Provision three air gapped Linux hosts according to our requirements These hosts will be disconnected from the internet, but require being able to connect with your private registry. -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +View hardware and software requirements for each of your cluster nodes in [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). ### B. Set up your Load Balancer @@ -88,8 +88,8 @@ You will need to configure a load balancer as a basic Layer 4 TCP forwarder to d **Load Balancer Configuration Samples:** -- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx) -- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb) +- For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx) +- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.](installation/options/nlb) @@ -104,9 +104,9 @@ Instead of running the Docker installation, you have the option to follow the Ku These hosts will be disconnected from the internet, but require being able to connect with your private registry. -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +View hardware and software requirements for each of your cluster nodes in [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) +### [Next: Collect and Publish Images to your Private Registry](../../../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/3-node-certificate-recognizedca.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md similarity index 98% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/3-node-certificate-recognizedca.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md index c546bb51f1d..3af4b3fe53a 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/3-node-certificate-recognizedca.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md @@ -16,7 +16,7 @@ The following template can be used for the cluster.yml if you have a setup with: - Layer 4 load balancer - [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). ```yaml nodes: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate/3-node-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md similarity index 98% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate/3-node-certificate.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md index 9f7552a58eb..86c06424c78 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-certificate/3-node-certificate.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md @@ -15,7 +15,7 @@ The following template can be used for the cluster.yml if you have a setup with: - Layer 4 load balancer - [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). ```yaml nodes: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/3-node-externalssl-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md similarity index 98% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/3-node-externalssl-certificate.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md index 8b2e38ac17e..59cf796e1ed 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/3-node-externalssl-certificate.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md @@ -15,7 +15,7 @@ The following template can be used for the cluster.yml if you have a setup with: - Layer 7 load balancer with self-signed SSL termination (HTTPS) - [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). ```yaml nodes: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/3-node-externalssl-recognizedca.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md similarity index 98% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/3-node-externalssl-recognizedca.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md index ee5d81eaecb..57107fbcc4a 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/3-node-externalssl-recognizedca.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md @@ -15,7 +15,7 @@ The following template can be used for the cluster.yml if you have a setup with: - Layer 7 load balancer with SSL termination (HTTPS) - [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). ```yaml nodes: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md index 3666bddd71d..e3ba6af7fc0 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md @@ -15,11 +15,11 @@ A layer-7 load balancer can be beneficial if you want to centralize your TLS ter This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. > **Want to skip the external load balancer?** -> See [Docker Installation]({{}}/rancher/v2.0-v2.4/en/installation/single-node) instead. +> See [Docker Installation](installation/single-node) instead. ## Requirements for OS, Docker, Hardware, and Networking -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your node fulfills the general [installation requirements.](../../../../pages-for-subheaders/installation-requirements.md) ## Installation Outline @@ -33,7 +33,7 @@ Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements) to launch your Rancher Server. +Provision a single Linux host according to our [Requirements](../../../../pages-for-subheaders/installation-requirements.md) to launch your Rancher Server. ## 2. Choose an SSL Option and Install Rancher @@ -169,20 +169,20 @@ http { ## What's Next? -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +- **Recommended:** Review [Single Node Backup and Restore](installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md).
## FAQ and Troubleshooting -For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +For help troubleshooting certificates, see [this section.](../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) ## Advanced Options ### API Auditing -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by adding the flags below into your install command. +If you want to record all transactions with the Rancher API, enable the [API Auditing](installation/api-auditing) feature by adding the flags below into your install command. -e AUDIT_LEVEL=1 \ -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ @@ -192,7 +192,7 @@ If you want to record all transactions with the Rancher API, enable the [API Aud ### Air Gap -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. +If you are visiting this page to complete an [Air Gap Installation](installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. **Example:** diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/api-audit-log/api-audit-log.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md similarity index 98% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/api-audit-log/api-audit-log.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md index 4808459f8b8..e51ade64e10 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/api-audit-log/api-audit-log.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md @@ -14,9 +14,9 @@ You can enable API Auditing during Rancher installation or upgrade. The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. -- [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) +- [Docker Install](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) -- [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) +- [Kubernetes Install](../../../../reference-guides/installation-references/helm-chart-options.md#api-audit-log) ## API Audit Log Options @@ -70,7 +70,7 @@ kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log #### Shipping the Audit Log -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) for details. +You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging](cluster-admin/tools/logging) for details. ## Audit Log Samples diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nginx/nginx.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nginx/nginx.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md index b81f5303496..1b1010bddaf 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nginx/nginx.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md @@ -24,7 +24,7 @@ After installing NGINX, you need to update the NGINX configuration file, `nginx. 1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. -2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/). +2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes](installation/options/helm2/create-nodes-lb/). >**Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nlb/nlb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nlb/nlb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md index f75a1a2ef96..e7b5c1304a9 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/nlb/nlb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md @@ -31,7 +31,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} +![](/img/ha/nlb/ec2-loadbalancing.png) Click **Create target group** to create the first target group, regarding TCP port 443. @@ -57,11 +57,11 @@ Success codes | `200-399` *** **Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} +![](/img/ha/nlb/create-targetgroup-443.png) *** **Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} +![](/img/ha/nlb/create-targetgroup-443-advanced.png) *** @@ -89,11 +89,11 @@ Success codes | `200-399` *** **Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} +![](/img/ha/nlb/create-targetgroup-80.png) *** **Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} +![](/img/ha/nlb/create-targetgroup-80-advanced.png) *** @@ -103,19 +103,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} +![](/img/ha/nlb/edit-targetgroup-443.png) Select the instances (Linux nodes) you want to add, and click **Add to registered**. *** **Screenshot Add targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} +![](/img/ha/nlb/add-targets-targetgroup-443.png) *** **Screenshot Added targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} +![](/img/ha/nlb/added-targets-targetgroup-443.png) When the instances are added, click **Save** on the bottom right of the screen. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md similarity index 85% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md index 789e01310f1..ec85db58ce9 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/troubleshooting/troubleshooting.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md @@ -23,4 +23,4 @@ helm version --server Error: could not find tiller ``` -When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) to install `tiller` with the correct `ServiceAccount`. +When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)](installation/options/helm2/helm-init/) to install `tiller` with the correct `ServiceAccount`. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/chart-options/chart-options.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/chart-options/chart-options.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md index e13a88a1a61..0c00c42c204 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/chart-options/chart-options.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md @@ -27,7 +27,7 @@ aliases: | `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | | `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | | `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) level. 0 is off. [0-3] | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | @@ -50,15 +50,15 @@ aliases: ### API Audit Log -Enabling the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing/). +Enabling the [API Audit Log](installation/api-auditing/). -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. ```plain --set auditLog.level=1 ``` -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the Rancher server cluster or System Project. Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. @@ -84,7 +84,7 @@ To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` a --set 'extraEnv[0].value=1.0' ``` -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. +See [TLS settings](admin-settings/tls-settings) for more information and options. ### Import `local` Cluster @@ -145,8 +145,8 @@ kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca- For details on installing Rancher with a private registry, see: -- [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) +- [Air Gap: Docker Install](installation/air-gap-single-node/) +- [Air Gap: Kubernetes Install](installation/air-gap-high-availability/) ### External TLS Termination @@ -155,7 +155,7 @@ We recommend configuring your load balancer as a Layer 4 balancer, forwarding pl You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/tls-secrets/) to add the CA cert for Rancher. +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](installation/options/helm2/helm-rancher/tls-secrets/) to add the CA cert for Rancher. Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/tls-secrets/tls-secrets.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/tls-secrets.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/tls-secrets/tls-secrets.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/tls-secrets.md diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/troubleshooting.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke/troubleshooting.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke/troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/api-auditing/api-auditing.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md similarity index 85% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/api-auditing/api-auditing.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md index 9425665cd71..c91db38fc21 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/api-auditing/api-auditing.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md @@ -9,9 +9,9 @@ aliases: >**Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. @@ -25,7 +25,7 @@ To enable API auditing: - Declare a `mountPath` in the `volumeMounts` directive of the container. - Declare a `path` in the `volumes` directive. -For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing). +For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing](installation/api-auditing). ```yaml ... diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/nlb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/nlb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md index be079813917..0b26617920c 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/nlb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md @@ -9,9 +9,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives @@ -38,7 +38,7 @@ Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get st The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} +![](/img/ha/nlb/ec2-loadbalancing.png) Click **Create target group** to create the first target group, regarding TCP port 443. @@ -64,11 +64,11 @@ Success codes | `200-399` *** **Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} +![](/img/ha/nlb/create-targetgroup-443.png) *** **Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} +![](/img/ha/nlb/create-targetgroup-443-advanced.png) *** @@ -96,11 +96,11 @@ Success codes | `200-399` *** **Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} +![](/img/ha/nlb/create-targetgroup-80.png) *** **Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} +![](/img/ha/nlb/create-targetgroup-80-advanced.png) *** @@ -110,19 +110,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} +![](/img/ha/nlb/edit-targetgroup-443.png) Select the instances (Linux nodes) you want to add, and click **Add to registered**. *** **Screenshot Add targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} +![](/img/ha/nlb/add-targets-targetgroup-443.png) *** **Screenshot Added targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} +![](/img/ha/nlb/added-targets-targetgroup-443.png) When the instances are added, click **Save** on the bottom right of the screen. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/alb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md similarity index 92% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/alb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md index cda6cd4f1d9..4a90ac33792 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/alb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md @@ -9,9 +9,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Objectives diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/nginx.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md similarity index 71% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/nginx.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md index c1e1c8024fb..32a854ae91a 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/nginx.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md @@ -9,9 +9,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. ## Install NGINX @@ -21,7 +21,7 @@ For help installing NGINX, refer to their [install documentation](https://www.ng ## Create NGINX Configuration -See [Example NGINX config]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#example-nginx-config). +See [Example NGINX config](installation/options/helm2/helm-rancher/chart-options/#example-nginx-config). ## Run NGINX diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/proxy/proxy.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md similarity index 82% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/proxy/proxy.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md index 80cf52b95b8..31c49041632 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/proxy/proxy.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md @@ -8,9 +8,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. @@ -28,8 +28,8 @@ NO_PROXY | Network address(es), network address range(s) and do When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. -* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/#5-download-rke-config-file-template) -* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template](../../../../../../pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template](../../../../../../pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md#5-download-rke-config-file-template) The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/404-default-backend.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/404-default-backend.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md index 0c2697ec112..bc3b683ab7c 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/404-default-backend.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md @@ -10,9 +10,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/generic-troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md similarity index 88% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/generic-troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md index 9019f0b737d..170557ccca7 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/generic-troubleshooting.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md @@ -9,15 +9,15 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. Below are steps that you can follow to determine what is wrong in your cluster. ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. +Double check if all the [required ports](../../../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. ### All nodes should be present and in **Ready** state @@ -145,7 +145,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition => End ``` -If you see error in the output, that means that the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened between the hosts indicated. +If you see error in the output, that means that the [required ports](../../../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened between the hosts indicated. Example error output of a situation where NODE1 had the UDP ports blocked. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/job-complete-status.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/job-complete-status.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md index f6591e3cd78..e32b4dfadb9 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/job-complete-status.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md @@ -9,9 +9,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/firewall/firewall.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/firewall/firewall.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md index 67c6f880325..fff3c153897 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/firewall/firewall.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md @@ -32,7 +32,7 @@ You can check the default firewall rules with this command: sudo iptables --list ``` -This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.0-v2.4/en/installation/references) for nodes in a high-availability Rancher server cluster. +This section describes how to use `firewalld` to apply the [firewall port rules](installation/references) for nodes in a high-availability Rancher server cluster. # Prerequisite diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-4-lb/layer-4-lb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md similarity index 94% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-4-lb/layer-4-lb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md index dfa49037e43..6667b4e6fee 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-4-lb/layer-4-lb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md @@ -10,9 +10,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](../../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: @@ -22,7 +22,7 @@ This procedure walks you through setting up a 3-node cluster using the Rancher K In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers -![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) +![Rancher HA](/img/ha/rancher2ha.svg) ## Installation Outline @@ -49,11 +49,11 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +Provision three Linux hosts according to our [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). ## 2. Configure Load Balancer -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb) +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) >**Note:** > In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. @@ -153,7 +153,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -167,8 +167,8 @@ RKE uses a `.yml` config file to install and configure your Kubernetes cluster. 1. Download one of following templates, depending on the SSL certificate you're using. - - [Template for self-signed certificate
]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate) - - [Template for certificate signed by recognized CA
]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca) + - [Template for self-signed certificate
](installation/options/cluster-yml-templates/3-node-certificate) + - [Template for certificate signed by recognized CA
](installation/options/cluster-yml-templates/3-node-certificate-recognizedca) @@ -185,7 +185,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -226,7 +226,7 @@ Choose from the following options: > >- The certificate files must be in PEM format. >- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) 1. In `kind: Secret` with `name: cattle-keys-ingress`: @@ -391,8 +391,8 @@ During installation, RKE automatically generates a config file named `kube_confi You have a couple of options: -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore](installation/backups-and-restoration/ha-backup-and-restoration). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md).
diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-7-lb/layer-7-lb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-7-lb/layer-7-lb.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md index c743c28edd3..d6fbab65f85 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/rke-add-on/layer-7-lb/layer-7-lb.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md @@ -10,9 +10,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](../../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: @@ -22,7 +22,7 @@ This procedure walks you through setting up a 3-node cluster using the Rancher K In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. Rancher installed on a Kubernetes cluster with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) +![Rancher HA](/img/ha/rancher2ha-l7.svg) ## Installation Outline @@ -47,7 +47,7 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +Provision three Linux hosts according to our [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). ## 2. Configure Load Balancer @@ -70,8 +70,8 @@ Health checks can be executed on the `/healthz` endpoint of the node, this will We have example configurations for the following load balancers: -* [Amazon ELB configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) -* [NGINX configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) +* [Amazon ELB configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) +* [NGINX configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) ## 3. Configure DNS @@ -101,7 +101,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -115,8 +115,8 @@ RKE uses a YAML config file to install and configure your Kubernetes cluster. Th 1. Download one of following templates, depending on the SSL certificate you're using. - - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate) - - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca) + - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`](installation/options/cluster-yml-templates/3-node-externalssl-certificate) + - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`](installation/options/cluster-yml-templates/3-node-externalssl-recognizedca) @@ -134,7 +134,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec >**Note:** > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -174,7 +174,7 @@ Choose from the following options: > >- The certificate files must be in PEM format. >- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) @@ -282,8 +282,8 @@ During installation, RKE automatically generates a config file named `kube_confi ## What's Next? -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/). +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration](backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster](tasks/clusters/creating-a-cluster/).
diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/etcd/etcd.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/etcd/etcd.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md diff --git a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md index 7159860f4b4..0c81703ab5b 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md @@ -7,9 +7,9 @@ aliases: This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. -> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup) in order to use the feature. +> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](installation/options/feature-flags/) Environment Variable Key | Default Value | Status | Available as of ---|---|---|--- diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/arm64-platform/arm64-platform.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md similarity index 81% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/arm64-platform/arm64-platform.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md index 03bcfbede09..776ce5b1ee6 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/arm64-platform/arm64-platform.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md @@ -12,7 +12,7 @@ aliases: The following options are available when using an ARM64 platform: - Running Rancher on ARM64 based node(s) - - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) link: + - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md) link: ``` # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. @@ -30,12 +30,12 @@ The following options are available when using an ARM64 platform: - Create custom cluster and adding ARM64 based node(s) - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/#flannel) + - CNI Network Provider must be [Flannel](../../../../faq/container-network-interface-providers.md#flannel) - Importing clusters that contain ARM64 based nodes - Kubernetes cluster version must be 1.12 or higher -Please see [Cluster Options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/) for information on how to configure the cluster options. +Please see [Cluster Options](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) for information on how to configure the cluster options. The following features are not tested: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md similarity index 92% rename from versioned_docs/version-2.0-2.4/installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md index e52edb1e601..bd22afaed5d 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md @@ -7,7 +7,7 @@ aliases: This feature allows you to use types for storage providers and provisioners that are not enabled by default. -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](installation/options/feature-flags/) Environment Variable Key | Default Value | Description ---|---|--- diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/rollbacks/rollbacks.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/rollbacks/rollbacks.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md index 9596006b5cc..f53434c344b 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/rollbacks/rollbacks.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -12,9 +12,9 @@ aliases: ### Rolling Back to Rancher v2.2-v2.4 -For Rancher installed on Kubernetes, follow the procedure detailed here: [Restoring Backups for Kubernetes installs.]({{}}/rancher/v2.0-v2.4/en/backups/restorations/ha-restoration) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. +For Rancher installed on Kubernetes, follow the procedure detailed here: [Restoring Backups for Kubernetes installs.](backups/restorations/ha-restoration) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. -For information on how to roll back Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks) +For information on how to roll back Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md) > Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. @@ -41,7 +41,7 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o 2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). +3. Rollback Rancher following the [normal instructions](upgrades/rollbacks/). 4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/helm2/helm2.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md similarity index 75% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/helm2/helm2.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md index 868aab2c23f..37c6c7433e1 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/helm2/helm2.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md @@ -21,20 +21,20 @@ import TabItem from '@theme/TabItem'; The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on). +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade](upgrades/upgrades/migrating-from-rke-add-on). >**Notes:** > -> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) -> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) -> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. +> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](installation/options/upgrading-cert-manager) +> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.](../../../../reference-guides/installation-references/helm-chart-options.md#configuring-ingress-for-external-tls-when-using-nginx-v0-25) +> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section](installation/upgrades-rollbacks/upgrades/ha/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. # Prerequisites -- **Review the [known upgrade issues]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -- **For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. +- **Review the [known upgrade issues](upgrades/upgrades)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry](../../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. # Upgrade Outline @@ -47,7 +47,7 @@ Follow the steps to upgrade Rancher server: ### A. Back up Your Kubernetes Cluster that is Running Rancher Server -[Take a one-time snapshot]({{}}/rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-b-one-time-snapshots) +[Take a one-time snapshot](backups/v2.0.x-v2.4.x/backup/rke-backups/#option-b-one-time-snapshots) of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restore point if something goes wrong during upgrade. ### B. Update the Helm chart repository @@ -60,7 +60,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a 1. Get the repository name that you used to install Rancher. - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). + For information about the repos and their differences, see [Helm Chart Repositories](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). {{< release-channel >}} @@ -72,7 +72,7 @@ of your Kubernetes cluster running Rancher server. You'll use the snapshot as a rancher- https://releases.rancher.com/server-charts/ ``` - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../../resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. 1. Fetch the latest chart to install Rancher from the Helm chart repository. @@ -128,7 +128,7 @@ If you are currently running the cert-manager whose version is older than v0.11, ``` In case this results in an error that the release "rancher" was not found, make sure you are using the correct deployment name. Use `helm list` to list the helm-deployed releases. -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions) page. +2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager](installation/options/upgrading-cert-manager/helm-2-instructions) page. 3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. @@ -217,8 +217,8 @@ Log into Rancher to confirm that the upgrade succeeded. >**Having network issues following upgrade?** > -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking](namespace-migration.md#restoring-cluster-networking). ## Rolling Back -Should something go wrong, follow the [roll back]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. +Should something go wrong, follow the [roll back](upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/migrating-from-rke-add-on.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/migrating-from-rke-add-on.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md index 50b771e626d..68138db7a11 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/migrating-from-rke-add-on.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md @@ -21,7 +21,7 @@ You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/insta > **Note:** This guide assumes a standard Rancher install. If you have modified any of the object names or namespaces, please adjust accordingly. -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps](../../../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. ### Point kubectl at your Rancher Cluster @@ -61,7 +61,7 @@ kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ Remove the Kubernetes objects created by the RKE install. -> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups) for details. +> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install](backups/backups/ha-backups) for details. ``` kubectl -n cattle-system delete ingress cattle-ingress-http @@ -109,5 +109,5 @@ addons: |- From here follow the standard install steps. -* [3 - Initialize Helm]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) -* [4 - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) +* [3 - Initialize Helm](installation/options/helm2/helm-init/) +* [4 - Install Rancher](installation/options/helm2/helm-rancher/) diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/namespace-migration/namespace-migration.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md similarity index 93% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/namespace-migration/namespace-migration.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md index 2ac6b85a05b..6681b1dc6ce 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/namespace-migration/namespace-migration.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md @@ -34,7 +34,7 @@ During upgrades from Rancher v2.0.6- to Rancher v2.0.7+, all system namespaces a - To prevent this issue from occurring before the upgrade, see [Preventing Cluster Networking Issues](#preventing-cluster-networking-issues). - To fix this issue following upgrade, see [Restoring Cluster Networking](#restoring-cluster-networking). -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps](../../../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. ## Preventing Cluster Networking Issues @@ -61,11 +61,11 @@ You can prevent cluster networking issues from occurring during your upgrade to >1 Only displays if this feature is enabled for the cluster.
Moving namespaces out of projects
- ![Moving Namespaces]({{}}/img/rancher/move-namespaces.png) + ![Moving Namespaces](/img/move-namespaces.png) 1. Repeat these steps for each cluster where you've assigned system namespaces to projects. -**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades). +**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade](upgrades/upgrades). ## Restoring Cluster Networking @@ -181,8 +181,8 @@ Reset the cluster nodes' network policies to restore connectivity. If you can access Rancher, but one or more of the clusters that you launched using Rancher has no networking, you can repair them by moving them: -- Using the cluster's [embedded kubectl shell]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/). -- By [downloading the cluster kubeconfig file and running it]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl) from your workstation. +- Using the cluster's [embedded kubectl shell](k8s-in-rancher/kubectl/). +- By [downloading the cluster kubeconfig file and running it](../../../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) from your workstation. ``` for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do diff --git a/versioned_docs/version-2.0-2.4/installation/requirements/installing-docker/installing-docker.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/install-docker.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/requirements/installing-docker/installing-docker.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/install-docker.md diff --git a/versioned_docs/version-2.0-2.4/installation/requirements/ports/ports.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/requirements/ports/ports.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md index 85df80477ff..dfbe84679ad 100644 --- a/versioned_docs/version-2.0-2.4/installation/requirements/ports/ports.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md @@ -143,13 +143,13 @@ The following tables break down the port requirements for Rancher nodes, for inb Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning). +The following diagram depicts the ports that are opened for each [cluster type](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md).
Port Requirements for the Rancher Management Plane
-![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) +![Basic Port Requirements](/img/port-communications.svg) >**Tip:** > @@ -160,7 +160,7 @@ The following diagram depicts the ports that are opened for each [cluster type](
Click to expand -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with nodes created in an [Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). >**Note:** >The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. @@ -174,7 +174,7 @@ The following table depicts the port requirements for [Rancher Launched Kubernet
Click to expand -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/). +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with [Custom Nodes](../../../pages-for-subheaders/use-existing-nodes.md). {{< ports-custom-nodes >}} @@ -185,7 +185,7 @@ The following table depicts the port requirements for [Rancher Launched Kubernet
Click to expand -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters). +The following table depicts the port requirements for [hosted clusters](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). {{< ports-imported-hosted >}} @@ -197,7 +197,7 @@ The following table depicts the port requirements for [hosted clusters]({{ Click to expand -The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/). +The following table depicts the port requirements for [imported clusters](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md). {{< ports-imported-hosted >}} @@ -210,7 +210,7 @@ The following table depicts the port requirements for [imported clusters]({{ @@ -230,7 +230,7 @@ In these cases, you have to explicitly allow this traffic in your host firewall, ### Rancher AWS EC2 Security Group -When using the [AWS EC2 node driver]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. +When using the [AWS EC2 node driver](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. | Type | Protocol | Port Range | Source/Destination | Rule Type | |-----------------|:--------:|:-----------:|------------------------|:---------:| diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md similarity index 86% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md index 9c4f4d00a5a..062dd65c9c3 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -12,7 +12,7 @@ In this section, you will provision the underlying infrastructure for your Ranch An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/) +The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.](../../../../pages-for-subheaders/installation-and-upgrade.md) @@ -29,9 +29,9 @@ We recommend setting up the following infrastructure for a high-availability ins These hosts will be disconnected from the internet, but require being able to connect with your private registry. -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. ### 2. Set up External Datastore @@ -45,9 +45,9 @@ For a high-availability K3s installation, you will need to set up one of the fol When you install Kubernetes, you will pass in details for K3s to connect to the database. -For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/rds) for setting up a MySQL database on Amazon's RDS service. +For an example of one way to set up the database, refer to this [tutorial](installation/options/rds) for setting up a MySQL database on Amazon's RDS service. -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) ### 3. Set up the Load Balancer @@ -60,11 +60,11 @@ When Rancher is installed (also in a later step), the Rancher system creates an For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. @@ -83,7 +83,7 @@ For a how-to guide for setting up a DNS record to route domain traffic to an Ama Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. +In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) with details from this registry. If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) @@ -109,9 +109,9 @@ The etcd database requires an odd number of nodes so that it can always elect a These hosts will be disconnected from the internet, but require being able to connect with your private registry. -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. ### 2. Set up the Load Balancer @@ -124,11 +124,11 @@ When Rancher is installed (also in a later step), the Rancher system creates an For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. @@ -147,7 +147,7 @@ For a how-to guide for setting up a DNS record to route domain traffic to an Ama Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. -In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file]({{}}/rke/latest/en/config-options/private-registries/) with details from this registry. +In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) with details from this registry. If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) @@ -163,9 +163,9 @@ If you need help with creating a private registry, please refer to the [official This host will be disconnected from the Internet, but needs to be able to connect to your private registry. -Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. ### 2. Set up a Private Docker Registry @@ -176,4 +176,4 @@ If you need help with creating a private registry, please refer to the [official -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) +### [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md similarity index 86% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md index 158a055e88b..fe0b6c9f2c3 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -10,9 +10,9 @@ import TabItem from '@theme/TabItem'; > Skip this section if you are installing Rancher on a single node with Docker. -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. +This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. -For Rancher before v2.4, Rancher should be installed on an [RKE]({{}}/rke/latest/en/) (Rancher Kubernetes Engine) Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. +For Rancher before v2.4, Rancher should be installed on an [RKE](https://rancher.com/docs/rke/latest/en/) (Rancher Kubernetes Engine) Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. In Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. The Rancher management server can only be run on a Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. @@ -64,7 +64,7 @@ configs: Note, at this time only secure registries are supported with K3s (SSL with custom CA). -For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) +For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) ### 3. Install K3s @@ -133,7 +133,7 @@ users: kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces ``` -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. ### Note on Upgrading @@ -150,7 +150,7 @@ We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Befor ### 1. Install RKE -Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) +Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) ### 2. Create an RKE Config File @@ -158,9 +158,9 @@ From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts) you created. +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes](installation/air-gap-high-availability/provision-hosts) you created. -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). +> **Tip:** For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/).
RKE Options
@@ -215,8 +215,8 @@ rke up --config ./rancher-cluster.yml Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ @@ -225,6 +225,6 @@ Save a copy of the following files in a secure location: ### Issues or errors? -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. +See the [Troubleshooting](installation/options/troubleshooting/) page. -### [Next: Install Rancher](../install-rancher) +### [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md index 48f312f546d..954279d62d6 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -31,9 +31,9 @@ This section describes installing Rancher in five parts: From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). {{< release-channel >}} ``` helm repo add rancher- https://releases.rancher.com/server-charts/ @@ -55,7 +55,7 @@ Rancher Server is designed to be secure by default and requires SSL/TLS configur When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). | Configuration | Chart option | Description | Requires cert-manager | | ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | @@ -82,7 +82,7 @@ Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-yo By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. > **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](installation/options/upgrading-cert-manager/). 1. From a system connected to the internet, add the cert-manager repo to Helm. ```plain @@ -174,7 +174,7 @@ If you are using a Private CA signed cert, add `--set privateCA=true` following **Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. +Then refer to [Adding TLS Secrets](installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them.
@@ -221,19 +221,19 @@ kubectl -n cattle-system apply -R -f ./rancher ``` **Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. # 5. For Rancher versions before v2.3.0, Configure System Charts -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../resources/local-system-charts.md). # Additional Resources These resources could be helpful when installing Rancher: -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) +- [Rancher Helm chart options](installation/resources/chart-options/) +- [Adding TLS secrets](installation/resources/encryption/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations](installation/options/troubleshooting/) @@ -253,10 +253,10 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher > **Do you want to...** > -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](installation/options/custom-ca-root-certificate/). +> - Record all transactions with the Rancher API? See [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/) +- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](../../resources/local-system-charts.md) Choose from the following options: @@ -272,7 +272,7 @@ Log into your Linux host, and then run the installation command below. When ente | Placeholder | Description | | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | ``` docker run -d --restart=unless-stopped \ @@ -295,7 +295,7 @@ In development or testing environments where your team will access your Rancher > From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. > > - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. @@ -306,7 +306,7 @@ After creating your certificate, log into your Linux host, and then run the inst | `` | The path to the private key for your certificate. | | `` | The path to the certificate authority's certificate. | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | @@ -340,7 +340,7 @@ After obtaining your certificate, log into your Linux host, and then run the ins | `` | The path to your full certificate chain. | | `` | The path to the private key for your certificate. | | `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | > **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. @@ -359,9 +359,9 @@ docker run -d --restart=unless-stopped \ If you are installing Rancher v2.3.0+, the installation is complete. -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../resources/local-system-charts.md). diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md similarity index 92% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md index a2f939e801d..41657d71230 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md @@ -13,11 +13,11 @@ import TabItem from '@theme/TabItem'; This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. +By default, all images used to [provision Kubernetes clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any [tools](../../../../reference-guides/rancher-cluster-tools.md) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. +The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes](../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed. > **Prerequisites:** > @@ -61,7 +61,7 @@ In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). ```plain helm repo add jetstack https://charts.jetstack.io @@ -236,7 +236,7 @@ The workstation must have Docker 18.02+ in order to support manifests, which are **For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). ```plain helm repo add jetstack https://charts.jetstack.io helm repo update @@ -296,6 +296,6 @@ chmod +x rancher-load-images.sh -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) +### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster](install-kubernetes.md) -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) +### [Next step for Docker Installs - Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md index 851e501a4f3..406364ad1e3 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md @@ -62,7 +62,7 @@ sudo systemctl restart docker You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: -* [RKE CLI binary]({{}}/rke/latest/en/installation/#download-the-rke-binary) +* [RKE CLI binary](https://rancher.com/docs/rke/latest/en/installation/#download-the-rke-binary) ``` sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 @@ -85,7 +85,7 @@ chmod +x get_helm.sh sudo ./get_helm.sh ``` -Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation]({{}}/rke/latest/en/example-yamls/). +Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). ``` nodes: @@ -139,13 +139,13 @@ default backend - 404 Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. ### Issues or errors? -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. +See the [Troubleshooting](installation/options/troubleshooting/) page. -### [Next: Install Rancher](../install-rancher) +### [Next: Install Rancher](install-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/install-rancher/install-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md similarity index 82% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/install-rancher/install-rancher.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md index c0f70adda0e..5e8232e0608 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/install-rancher/install-rancher.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md @@ -75,12 +75,12 @@ kubectl rollout status deployment -n cattle-system rancher You can now navigate to `https://rancher.example.com` and start using Rancher. -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. ### Additional Resources These resources could be helpful when installing Rancher: -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) +- [Rancher Helm chart options](installation/resources/chart-options/) +- [Adding TLS secrets](installation/resources/encryption/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations](installation/options/troubleshooting/) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md similarity index 88% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md index eb8ab34b4e2..fcd7ad6a066 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md @@ -23,9 +23,9 @@ The etcd database requires an odd number of nodes so that it can always elect a These hosts will connect to the internet through an HTTP proxy. -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. ### 2. Set up the Load Balancer @@ -38,11 +38,11 @@ When Rancher is installed (also in a later step), the Rancher system creates an For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. @@ -58,4 +58,4 @@ You will need to specify this hostname in a later step when you install Rancher, For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) -### [Next: Set up a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) +### [Next: Set up a Kubernetes cluster](install-kubernetes.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md index edd7af34feb..d0bfbfb1a2a 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md @@ -6,7 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/upgrades/rollbacks/single-node-rollbacks --- -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade). Rolling back restores: +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade](upgrades/upgrades/single-node-upgrade). Rolling back restores: - Your previous version of Rancher. - Your data backup created before upgrade. @@ -23,7 +23,7 @@ In this command, `` is the version of Rancher you were ru Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. -Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) +Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference](/img/placeholder-ref-2.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | ------------------------------------------------------- | @@ -58,9 +58,9 @@ If you have issues upgrading Rancher, roll it back to its latest known healthy s ``` You can obtain the name for your Rancher container by entering `docker ps`. -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Docker Upgrade](upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). + If you followed the naming convention we suggested in [Docker Upgrade](upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). 1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md similarity index 83% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md index b3d2643849d..ec1d1343e57 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md @@ -16,8 +16,8 @@ The following instructions will guide you through upgrading a Rancher server tha # Prerequisites -- **Review the [known upgrade issues]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. -- **For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. +- **Review the [known upgrade issues](../../../../pages-for-subheaders/upgrades.md#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren’t supported. +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry](../air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. # Placeholder Review @@ -43,7 +43,7 @@ Write down or copy this information before starting the upgrade. Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) +![Placeholder Reference](/img/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -110,7 +110,7 @@ Pull the image of the Rancher version that you want to upgrade to. Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. ``` docker pull rancher/rancher: @@ -122,11 +122,11 @@ Start a new Rancher server container using the data from the `rancher-data` cont >**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. -If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) +If you used a proxy, see [HTTP Proxy Configuration.](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) -If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) -If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) +If you are recording all transactions with the Rancher API, see [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) To see the command to use when starting the new Rancher server container, choose from the following options: @@ -147,7 +147,7 @@ If you have selected to use the Rancher generated self-signed certificate, you a Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -173,7 +173,7 @@ Placeholder | Description `` | The path to your full certificate chain. `` | The path to the private key for your certificate. `` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -195,14 +195,14 @@ docker run -d --volumes-from rancher-data \ If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) Placeholder | Description ------------|------------- `` | The path to the directory containing your certificate files. `` | The path to your full certificate chain. `` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -232,7 +232,7 @@ If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificat Placeholder | Description ------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. `` | The domain address that you had originally started with ``` @@ -250,7 +250,7 @@ docker run -d --volumes-from rancher-data \ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. -> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/) +> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](../../resources/local-system-charts.md) When starting the new Rancher server container, choose from the following options: @@ -264,7 +264,7 @@ If you have selected to use the Rancher generated self-signed certificate, you a Placeholder | Description ------------|------------- `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to to upgrade to. ``` docker run -d --volumes-from rancher-data \ @@ -284,7 +284,7 @@ Placeholder | Description If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. ->**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +>**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) Placeholder | Description ------------|------------- @@ -293,7 +293,7 @@ Placeholder | Description `` | The path to the private key for your certificate. `` | The path to the certificate authority's certificate. `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. ``` docker run -d --restart=unless-stopped \ @@ -314,7 +314,7 @@ docker run -d --restart=unless-stopped \ If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. - >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) + >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) Placeholder | Description ------------|------------- @@ -322,7 +322,7 @@ Placeholder | Description `` | The path to your full certificate chain. `` | The path to the private key for your certificate. `` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. > **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. @@ -351,7 +351,7 @@ Log into Rancher. Confirm that the upgrade succeeded by checking the version dis >**Having network issues in your user clusters following upgrade?** > -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking](../../install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md#restoring-cluster-networking). # 6. Clean up Your Old Rancher Server Container @@ -360,4 +360,4 @@ Remove the previous Rancher server container. If you only stop the previous Ranc # Rolling Back -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/single-node-rollbacks/). +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback](upgrades/rollbacks/single-node-rollbacks/). diff --git a/versioned_docs/version-2.0-2.4/installation/resources/tls-secrets/tls-secrets.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md similarity index 83% rename from versioned_docs/version-2.0-2.4/installation/resources/tls-secrets/tls-secrets.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md index 351dc19b5ae..f9b2d4a4352 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/tls-secrets/tls-secrets.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md @@ -35,4 +35,4 @@ kubectl -n cattle-system create secret generic tls-ca \ # Updating a Private CA Certificate -Follow the steps on [this page]({{}}/rancher/v2.0-v2.4/en/installation/resources/update-rancher-cert) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file +Follow the steps on [this page](update-rancher-certificate.md) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/choosing-version/choosing-version.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/resources/choosing-version/choosing-version.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md index 02b73f06748..8b8fbcab31d 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/choosing-version/choosing-version.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md @@ -10,16 +10,16 @@ import TabItem from '@theme/TabItem'; This section describes how to choose a Rancher version. -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. ### Helm Chart Repositories @@ -79,12 +79,12 @@ After installing Rancher, if you want to change which Helm chart repository to i helm repo add rancher- https://releases.rancher.com/server-charts/ ``` -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. +4. Continue to follow the steps to [upgrade Rancher](installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. -When performing [Docker installs]({{}}/rancher/v2.0-v2.4/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. +When performing [Docker installs](installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. ### Server Tags diff --git a/versioned_docs/version-2.0-2.4/installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md similarity index 80% rename from versioned_docs/version-2.0-2.4/installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md index 6474fc305ab..892d6ae6e76 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md @@ -22,7 +22,7 @@ Examples of services that Rancher can access: For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: -- [Docker install Custom CA certificate options]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) +- [Docker install Custom CA certificate options](../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) -- [Kubernetes install options for Additional Trusted CAs]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#additional-trusted-cas) +- [Kubernetes install options for Additional Trusted CAs](../../../reference-guides/installation-references/helm-chart-options.md#additional-trusted-cas) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/helm-version/helm-version.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md similarity index 78% rename from versioned_docs/version-2.0-2.4/installation/resources/helm-version/helm-version.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md index dc0a2a72a13..e2165e7096c 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/helm-version/helm-version.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md @@ -10,7 +10,7 @@ aliases: This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. -> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. +> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section](installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. - Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/local-system-charts/local-system-charts.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/resources/local-system-charts/local-system-charts.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md index b42f98d8aa2..1778e59f1b2 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/local-system-charts/local-system-charts.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md @@ -18,7 +18,7 @@ In an air gapped installation of Rancher, you will need to configure Rancher to In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. -Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-rancher/) instructions. +Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation](installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation](installation/air-gap-high-availability/install-rancher/) instructions. # Setting Up System Charts for Rancher Before v2.3.0 @@ -56,11 +56,11 @@ In the catalog management page in the Rancher UI, follow these steps: 1. Open `https:///v3/catalogs/system-library` in your browser. - {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} + ![](/img/airgap/system-charts-setting.png) 1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} + ![](/img/airgap/system-charts-update.png) 1. Click **Show Request** diff --git a/versioned_docs/version-2.0-2.4/installation/resources/update-rancher-cert/update-rancher-cert.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/update-rancher-cert/update-rancher-cert.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md index f27a09a8825..f6b30a398b7 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/update-rancher-cert/update-rancher-cert.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md @@ -5,7 +5,7 @@ weight: 10 # Updating a Private CA Certificate -Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. +Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. A summary of the steps is as follows: @@ -58,7 +58,7 @@ $ kubectl -n cattle-system create secret generic tls-ca \ ## 3. Reconfigure the Rancher deployment -> Before proceeding, [generate an API token in the Rancher UI]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/#creating-an-api-key) (User > API & Keys). +> Before proceeding, [generate an API token in the Rancher UI](../../../reference-guides/user-settings/api-keys.md#creating-an-api-key) (User > API & Keys). This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). diff --git a/versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/helm-2-instructions/helm-2-instructions.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/helm-2-instructions/helm-2-instructions.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md index 2a4ea7f70ed..7fb9e3a0474 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/helm-2-instructions/helm-2-instructions.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md @@ -27,7 +27,7 @@ To address these changes, this guide will do two things: > The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. ## Upgrade Cert-Manager Only @@ -85,7 +85,7 @@ In order to upgrade cert-manager, follow these instructions: Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. +1. Follow the guide to [Prepare your Private Registry](installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. 1. From a system connected to the internet, add the cert-manager repo to Helm diff --git a/versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md index e4e1b6ad47d..bb9758deb5e 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md @@ -27,13 +27,13 @@ To address these changes, this guide will do two things: > The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. # Upgrade Cert-Manager The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. -> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions) +> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.](installation/options/upgrading-cert-manager/helm-2-instructions) In order to upgrade cert-manager, follow these instructions: @@ -116,7 +116,7 @@ In order to upgrade cert-manager, follow these instructions: Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. +1. Follow the guide to [Prepare your Private Registry](installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. 1. From a system connected to the internet, add the cert-manager repo to Helm diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md similarity index 83% rename from versioned_docs/version-2.0-2.4/cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md index a0049bf6b55..e71a60861c9 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md @@ -8,7 +8,7 @@ import TabItem from '@theme/TabItem'; Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. -Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation]({{}}/rke/latest/en/). +Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/). This section covers the following topics: @@ -27,11 +27,11 @@ This section covers the following topics: # New Features -As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata) +As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.](upgrade-kubernetes-without-upgrading-rancher.md) As of Rancher v2.4.0, -- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters) +- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) - New advanced options are exposed in the Rancher UI for configuring the upgrade strategy of an RKE cluster: **Maximum Worker Nodes Unavailable** and **Drain nodes.** These options leverage the new cluster upgrade process of RKE v1.1.0, in which worker nodes are upgraded in batches, so that applications can remain available during cluster upgrades, under [certain conditions.](#maintaining-availability-for-applications-during-upgrades) # Tested Kubernetes Versions @@ -42,7 +42,7 @@ Before a new version of Rancher is released, it's tested with the latest minor v RKE v1.1.0 changed the way that clusters are upgraded. -In this section of the [RKE documentation,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. +In this section of the [RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. # Recommended Best Practice for Upgrades @@ -76,8 +76,8 @@ The cluster cannot be downgraded to a previous Kubernetes version. > **Prerequisites:** > -> - The options below are available only for [Rancher-launched RKE Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) and imported/registered K3s Kubernetes clusters. -> - Before upgrading Kubernetes, [back up your cluster.]({{}}/rancher/v2.0-v2.4/en/backups) +> - The options below are available only for [Rancher-launched RKE Kubernetes clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) and imported/registered K3s Kubernetes clusters. +> - Before upgrading Kubernetes, [back up your cluster.](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md) 1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. @@ -95,12 +95,12 @@ _Available as of v2.4_ A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: -- [Backing up a cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#how-snapshots-work) -- [Restoring a cluster from backup]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/#restoring-a-cluster-from-a-snapshot) +- [Backing up a cluster](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md#how-snapshots-work) +- [Restoring a cluster from backup](../../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md#restoring-a-cluster-from-a-snapshot) # Configuring the Upgrade Strategy -As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements]({{}}/rke/latest/en/upgrades/maintaining-availability) are met. +As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability) are met. The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. @@ -128,7 +128,7 @@ To enable draining each node during a cluster upgrade, 1. Go to the cluster view in the Rancher UI. 1. Click **⋮ > Edit.** 1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** -1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes/#aggressive-and-safe-draining-options) +1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md#aggressive-and-safe-draining-options) 1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. 1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. 1. Click **Save.** @@ -141,13 +141,13 @@ To enable draining each node during a cluster upgrade, _Available as of RKE v1.1.0_ -In [this section of the RKE documentation,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. +In [this section of the RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. ### Configuring the Upgrade Strategy in the cluster.yml More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. -For details, refer to [Configuring the Upgrade Strategy]({{}}/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. +For details, refer to [Configuring the Upgrade Strategy](https://rancher.com/docs/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. # Troubleshooting diff --git a/versioned_docs/version-2.0-2.4/admin-settings/k8s-metadata/k8s-metadata.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md similarity index 88% rename from versioned_docs/version-2.0-2.4/admin-settings/k8s-metadata/k8s-metadata.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md index 9d48da9cf06..810fa193368 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/k8s-metadata/k8s-metadata.md +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -12,7 +12,7 @@ The RKE metadata feature allows you to provision clusters with new versions of K > **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. -Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. +Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. This table below describes the CRDs that are affected by the periodic data sync. @@ -32,7 +32,7 @@ Administrators might configure the RKE metadata settings to do the following: ### Refresh Kubernetes Metadata -The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) +The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. @@ -95,6 +95,6 @@ After new Kubernetes versions are loaded into the Rancher setup, additional step 1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. 1. Download the OS specific image lists for Linux or Windows. 1. Download `rancher-images.txt`. -1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. +1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. **Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/versioned_docs/version-2.0-2.4/overview/overview.md b/versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md similarity index 63% rename from versioned_docs/version-2.0-2.4/overview/overview.md rename to versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md index 417bcbcf30a..ec3fe481a80 100644 --- a/versioned_docs/version-2.0-2.4/overview/overview.md +++ b/versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md @@ -22,7 +22,7 @@ Rancher provides an intuitive user interface for DevOps engineers to manage thei The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. -![Platform]({{}}/img/rancher/platform.png) +![Platform](/img/platform.png) # Features of the Rancher API Server @@ -30,21 +30,21 @@ The Rancher API server is built on top of an embedded Kubernetes API server and ### Authorization and Role-Based Access Control -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/) policies. +- **User management:** The Rancher API server [manages user identities](../../pages-for-subheaders/about-authentication.md) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) and [security](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) policies. ### Working with Kubernetes -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.0-v2.4/en/catalog/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.0-v2.4/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on existing nodes, or perform [Kubernetes upgrades.](../installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts](catalog/) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration](../../pages-for-subheaders/manage-projects.md) and for [managing applications within projects.](../../pages-for-subheaders/kubernetes-resources-setup.md) +- **Pipelines:** Setting up a [pipeline](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio](../../pages-for-subheaders/istio.md) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. ### Working with Cloud Infrastructure -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) in the cloud. +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) and [persistent storage](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in the cloud. ### Cluster Visibility @@ -54,12 +54,12 @@ The Rancher API server is built on top of an embedded Kubernetes API server and # Editing Downstream Clusters with Rancher -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.](../../pages-for-subheaders/cluster-configuration.md) The following table summarizes the options and settings available for each cluster type: -import ClusterCapabilitiesTable from '/rancher/v2.0-v2.4/en/shared-files/_cluster-capabilities-table.md'; +import ClusterCapabilitiesTable from 'shared-files/_cluster-capabilities-table.md'; diff --git a/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md b/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/cli/cli.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md similarity index 57% rename from versioned_docs/version-2.0-2.4/quick-start-guide/cli/cli.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md index f991ddeef28..445cdbc80aa 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/cli/cli.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md @@ -7,7 +7,7 @@ Interact with Rancher using command line interface (CLI) tools from your worksta ## Rancher CLI -Follow the steps in [rancher cli](../../cli). +Follow the steps in [rancher cli](../../pages-for-subheaders/cli-with-rancher.md). Ensure you can run `rancher kubectl get pods` successfully. @@ -26,7 +26,7 @@ _**Available as of v2.4.6**_ _Requirements_ -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher cli](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see error like: +If admins have [enforced TTL on kubeconfig tokens](../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher cli](cli.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see error like: `Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: @@ -37,15 +37,15 @@ This feature enables kubectl to authenticate with the Rancher server and get a n 4. SAML providers - Ping, Okta, ADFS, Keycloak, Shibboleth When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. -The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid till [it expires](../../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../api/api-tokens/#deleting-tokens) +The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid till [it expires](../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../reference-guides/about-the-api/api-tokens.md#deleting-tokens) Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. _Note_ -As of CLI [v2.4.10](https://github.com/rancher/cli/releases/tag/v2.4.10), the kubeconfig token can be cached at a chosen path with `cache-dir` flag or env var `RANCHER_CACHE_DIR`. +As of CLI [v2.4.10](https://github.com/ranchquick-start-guide/cli/releases/tag/v2.4.10), the kubeconfig token can be cached at a chosen path with `cache-dir` flag or env var `RANCHER_CACHE_DIR`. _**Current Known Issues**_ -1. If [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) is enabled for RKE clusters to [authenticate directly with downstream cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) and Rancher server goes down, all kubectl calls will fail after the kubeconfig token expires. No new kubeconfig tokens can be generated if Rancher server isn't accessible. -2. If a kubeconfig token is deleted from Rancher [API tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#deleting-tokens) page, and the token is still cached, cli won't ask you to login again until the token expires or is deleted. +1. If [authorized cluster endpoint](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) is enabled for RKE clusters to [authenticate directly with downstream cluster](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) and Rancher server goes down, all kubectl calls will fail after the kubeconfig token expires. No new kubeconfig tokens can be generated if Rancher server isn't accessible. +2. If a kubeconfig token is deleted from Rancher [API tokens]({{}}/rancher/v2.0-v2api/api-tokens/#deleting-tokens) page, and the token is still cached, cli won't ask you to login again until the token expires or is deleted. `kubectl` calls will result into an error like `error: You must be logged in to the server (the server has asked for the client to provide credentials`. Tokens can be deleted using `rancher token delete`. diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md similarity index 94% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md index 92e07f38fc3..a7b829710d7 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md @@ -5,7 +5,7 @@ weight: 100 --- The following steps will quickly deploy a Rancher Server on AWS with a single node cluster attached. ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Prerequisites @@ -59,7 +59,7 @@ Two Kubernetes clusters are deployed into your AWS account, one running Rancher ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). ## Destroying the Environment diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md similarity index 95% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md index aba878fc9ab..db6e0c89eb6 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md @@ -6,7 +6,7 @@ weight: 100 The following steps will quickly deploy a Rancher server on Azure in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Prerequisites @@ -65,7 +65,7 @@ Two Kubernetes clusters are deployed into your Azure account, one running Ranche ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). ## Destroying the Environment diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md similarity index 94% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md index 06f557f5c88..9dc4553dccc 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md @@ -5,7 +5,7 @@ weight: 100 --- The following steps will quickly deploy a Rancher Server on DigitalOcean with a single node cluster attached. ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Prerequisites @@ -59,7 +59,7 @@ Two Kubernetes clusters are deployed into your DigitalOcean account, one running ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). ## Destroying the Environment diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md similarity index 95% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md index 862baa347c6..2e8838ced54 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md @@ -5,7 +5,7 @@ weight: 100 --- The following steps will quickly deploy a Rancher server on GCP in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Prerequisites @@ -60,7 +60,7 @@ Two Kubernetes clusters are deployed into your GCP account, one running Rancher ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). ## Destroying the Environment diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md similarity index 90% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md index 753006eb6b6..ad920576970 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md @@ -8,7 +8,7 @@ Howdy Partner! This tutorial walks you through: - Creation of your first cluster - Deployment of an application, Nginx ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Quick Start Outline @@ -38,9 +38,9 @@ This Quick Start Guide is divided into different tasks for easier consumption. >**Note:** > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. > - > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/). + > For a full list of port requirements, refer to [Docker Installation](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md). - Provision the host according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). + Provision the host according to our [Requirements](../../../pages-for-subheaders/installation-requirements.md). ### 2. Install Rancher @@ -115,4 +115,4 @@ Congratulations! You have created your first cluster. #### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md similarity index 91% rename from versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md index a996135f282..b6e1b1d2c8b 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md @@ -4,7 +4,7 @@ weight: 200 --- The following steps quickly deploy a Rancher Server with a single node cluster attached. ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). ## Prerequisites @@ -38,7 +38,7 @@ The following steps quickly deploy a Rancher Server with a single node cluster a ### What's Next? -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). ## Destroying the Environment diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md similarity index 87% rename from versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md index 55e1fd93a23..080f324f051 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md @@ -33,15 +33,15 @@ For this workload, you'll be deploying the application Rancher Hello-World. 9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. - ![As a dropdown, NodePort (On every node selected)]({{}}/img/rancher/nodeport-dropdown.png) + ![As a dropdown, NodePort (On every node selected)](/img/nodeport-dropdown.png) 10. From the **On Listening Port** field, leave the **Random** value in place. - ![On Listening Port, Random selected]({{}}/img/rancher/listening-port-field.png) + ![On Listening Port, Random selected](/img/listening-port-field.png) 11. From the **Publish the container port** field, enter port `80`. - ![Publish the container port, 80 entered]({{}}/img/rancher/container-port-field.png) + ![Publish the container port, 80 entered](/img/container-port-field.png) 12. Leave the remaining options on their default setting. We'll tell you about them later. @@ -151,6 +151,6 @@ Congratulations! You have successfully deployed a workload exposed via a NodePor When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md similarity index 85% rename from versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md rename to versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md index e47fa946ccb..e287a0d5947 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md @@ -77,6 +77,6 @@ Congratulations! You have successfully deployed a workload exposed via an ingres When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides.md b/versioned_docs/version-2.0-2.4/how-to-guides.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/ad/ad.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md similarity index 96% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/ad/ad.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md index c0361a86e77..687c1615853 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/ad/ad.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md @@ -7,11 +7,11 @@ aliases: If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap) integration. +Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication](../../../../../pages-for-subheaders/configure-openldap.md) integration. > **Note:** > -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). ## Prerequisites @@ -148,7 +148,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: -{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} +![](/img/ldapsearch-user.png) Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. @@ -181,7 +181,7 @@ $ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ This command will inform us on the attributes used for group objects: -{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} +![](/img/ldapsearch-group.png) Again, this allows us to determine the correct values to enter in the group schema configuration: @@ -196,4 +196,4 @@ In the same way, we can observe that the value in the **memberOf** attribute in ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/azure-ad/azure-ad.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md similarity index 90% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/azure-ad/azure-ad.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md index d4c9e8c1eb8..4463db7b0de 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/azure-ad/azure-ad.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md @@ -42,11 +42,11 @@ Before enabling Azure AD within Rancher, you must register Rancher with Azure. 1. Use search to open the **App registrations** service. - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) + ![Open App Registrations](/img/search-app-registrations.png) 1. Click **New registrations** and complete the **Create** form. - ![New App Registration]({{}}/img/rancher/new-app-registration.png) + ![New App Registration](/img/new-app-registration.png) 1. Enter a **Name** (something like `Rancher`). @@ -66,13 +66,13 @@ From the Azure portal, create a client secret. Rancher will use this key to auth 1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) + ![Open Rancher Registration](/img/open-rancher-app.png) 1. From the navigation pane on left, click **Certificates and Secrets**. 1. Click **New client secret**. - ![Create new client secret]({{< baseurl >}}/img/rancher/select-client-secret.png) + ![Create new client secret](/img/select-client-secret.png) 1. Enter a **Description** (something like `Rancher`). @@ -93,13 +93,13 @@ Next, set API permissions for Rancher within Azure. 1. From the navigation pane on left, select **API permissions**. - ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) + ![Open Required Permissions](/img/select-required-permissions.png) 1. Click **Add a permission**. 1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: - ![Select API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-2.png) + ![Select API Permissions](/img/select-required-permissions-2.png)

@@ -124,7 +124,7 @@ To use Azure AD with Rancher you must whitelist Rancher with Azure. You can comp 1. From the **Setting** blade, select **Reply URLs**. - ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) + ![Azure: Enter Reply URL](/img/enter-azure-reply-url.png) 1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. @@ -144,7 +144,7 @@ As your final step in Azure, copy the data that you'll use to configure Rancher 1. Use search to open the **Azure Active Directory** service. - ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) + ![Open Azure Active Directory](/img/search-azure-ad.png) 1. From the left navigation pane, open **Overview**. @@ -156,7 +156,7 @@ As your final step in Azure, copy the data that you'll use to configure Rancher 1. Use search to open **App registrations**. - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) + ![Open App Registrations](/img/search-app-registrations.png) 1. Find the entry you created for Rancher. @@ -166,7 +166,7 @@ As your final step in Azure, copy the data that you'll use to configure Rancher 1. From **App registrations**, click **Endpoints**. - ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) + ![Click Endpoints](/img/click-endpoints.png) 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/freeipa/freeipa.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md similarity index 95% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/freeipa/freeipa.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md index b788ba9d06d..0e9f006970f 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/freeipa/freeipa.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md @@ -13,7 +13,7 @@ If your organization uses FreeIPA for user authentication, you can configure Ran > >- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. >- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +>- Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). 1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/github/github.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md similarity index 93% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/github/github.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md index a9667696ea5..6ba4fd5f04f 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/github/github.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md @@ -7,7 +7,7 @@ aliases: In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +>**Prerequisites:** Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). 1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/google/google.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md similarity index 90% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/google/google.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md index 564b3920fc2..23a8381545c 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/google/google.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md @@ -7,7 +7,7 @@ If your organization uses G Suite for user authentication, you can configure Ran Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. -Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) can configure authentication. +Within Rancher, only administrators or users with the **Manage Authentication** [global role](../../manage-role-based-access-control-rbac/global-permissions.md) can configure authentication. # Prerequisites - You must have a [G Suite admin account](https://admin.google.com) configured. @@ -15,7 +15,7 @@ Within Rancher, only administrators or users with the **Manage Authentication** - You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: -![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) +![Enable Admin APIs](/img/Google-Enable-APIs-Screen.png) # Setting up G Suite for OAuth with Rancher Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: @@ -28,7 +28,7 @@ Before you can set up Google OAuth in Rancher, you need to log in to your G Suit ### 1. Adding Rancher as an Authorized Domain 1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. 1. Select your project and click **OAuth consent screen.** -![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) +![OAuth Consent Screen](/img/Google-OAuth-consent-screen-tab.png) 1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) 1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. @@ -36,7 +36,7 @@ Before you can set up Google OAuth in Rancher, you need to log in to your G Suit ### 2. Creating OAuth2 Credentials for the Rancher Server 1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) -![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) +![Credentials](/img/Google-Credentials-tab.png) 1. On the **Create Credentials** dropdown, select **OAuth client ID.** 1. Click **Web application.** 1. Provide a name. @@ -63,11 +63,11 @@ This section describes how to: 1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. 1. Click on **Create Service Account.** 1. Enter a name and click **Create.** -![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) +![Service account creation Step 1](/img/Google-svc-acc-step1.png) 1. Don't provide any roles on the **Service account permissions** page and click **Continue** -![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) +![Service account creation Step 2](/img/Google-svc-acc-step2.png) 1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. -![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) +![Service account creation Step 3](/img/Google-svc-acc-step3-key-creation.png) **Result:** Your service account is created. @@ -79,7 +79,7 @@ Using the Unique ID of the service account key, register it as an Oauth Client u 1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** - ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) + ![Service account Unique ID](/img/Google-Select-UniqueID-column.png) 1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) 1. Add the Unique ID obtained in the previous step in the **Client Name** field. 1. In the **One or More API Scopes** field, add the following scopes: @@ -91,7 +91,7 @@ Using the Unique ID of the service account key, register it as an Oauth Client u **Result:** The service account is registered as an OAuth client in your G Suite account. # Configuring Google OAuth in Rancher -1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. +1. Sign into Rancher using a local user assigned the [administrator](../../manage-role-based-access-control-rbac/global-permissions.md) role. This user is also called the local principal. 1. From the **Global** view, click **Security > Authentication** from the main menu. 1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/keycloak/keycloak.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md similarity index 95% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/keycloak/keycloak.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md index be49ae9ad1d..abc22cb502c 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/keycloak/keycloak.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md @@ -25,13 +25,13 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati >1: Optionally, you can enable either one or both of these settings. >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. - {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} + ![](/img/keycloak/keycloak-saml-client-configuration.png) - In the new SAML client, create Mappers to expose the users fields - Add all "Builtin Protocol Mappers" - {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} + ![](/img/keycloak/keycloak-saml-client-builtin-mappers.png) - Create a new "Group list" mapper to map the member attribute to a user's groups - {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} + ![](/img/keycloak/keycloak-saml-client-group-mapper.png) - Export a `metadata.xml` file from your Keycloak client: From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. @@ -93,7 +93,7 @@ If your organization uses Keycloak Identity Provider (IdP) for user authenticati ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. ### You are not redirected to Keycloak diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/okta/okta.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md similarity index 100% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/okta/okta.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/ping-federate/ping-federate.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md similarity index 100% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/ping-federate/ping-federate.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/local/local.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md similarity index 100% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/local/local.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/user-groups/user-groups.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md similarity index 95% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/user-groups/user-groups.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md index d22d705befa..3c01a373a50 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/user-groups/user-groups.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md @@ -5,11 +5,11 @@ weight: 1 Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). +Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control](../../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md). ## Managing Members -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. +When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local](create-local-users.md) user account, you will not be able to search for GitHub users or groups. All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md similarity index 75% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md index 0c2979fcb61..13c946a6666 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md @@ -9,57 +9,57 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re 1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - {{< img "/img/rancher/adfs/adfs-overview.png" "">}} + ![](/img/adfs/adfs-overview.png) 1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} + ![](/img/adfs/adfs-add-rpt-2.png) 1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} + ![](/img/adfs/adfs-add-rpt-3.png) 1. Select **AD FS profile** as the configuration profile for your relying party trust. - {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} + ![](/img/adfs/adfs-add-rpt-4.png) 1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} + ![](/img/adfs/adfs-add-rpt-5.png) 1. Select **Enable support for the SAML 2.0 WebSSO protocol** and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} + ![](/img/adfs/adfs-add-rpt-6.png) 1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} + ![](/img/adfs/adfs-add-rpt-7.png) 1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} + ![](/img/adfs/adfs-add-rpt-8.png) 1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} + ![](/img/adfs/adfs-add-rpt-9.png) 1. After reviewing your settings, select **Next** to add the relying party trust. - {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} + ![](/img/adfs/adfs-add-rpt-10.png) 1. Select **Open the Edit Claim Rules...** and click **Close**. - {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} + ![](/img/adfs/adfs-add-rpt-11.png) 1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} + ![](/img/adfs/adfs-edit-cr.png) 1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} + ![](/img/adfs/adfs-add-tcr-1.png) 1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: @@ -70,7 +70,7 @@ Before configuring Rancher to support AD FS users, you must add Rancher as a [re | Token-Groups - Qualified by Long Domain Name | Group | | SAM-Account-Name | Name |
- {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} + ![](/img/adfs/adfs-add-tcr-2.png) 1. Download the `federationmetadata.xml` from your AD server at: ``` @@ -79,4 +79,4 @@ https:///federationmetadata/2007-06/federationmetadata.xml **Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. -### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) +### [Next: Configuring Rancher for Microsoft AD FS](configure-rancher-for-ms-adfs.md) diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md similarity index 93% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md index be585ae2f00..11deb4d2de0 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md @@ -4,7 +4,7 @@ weight: 1205 --- _Available as of v2.0.7_ -After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. +After you complete [Configuring Microsoft AD FS for Rancher](configure-ms-adfs-for-rancher.md), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. >**Important Notes For Configuring Your AD FS Server:** > diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/about/about.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md similarity index 95% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/about/about.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md index 6a057b2104a..e1bca2e6d03 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/about/about.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md @@ -30,5 +30,5 @@ When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shi Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. -![Adding OpenLDAP Group Permissions to Rancher Resources]({{}}/img/rancher/shibboleth-with-openldap-groups.svg) +![Adding OpenLDAP Group Permissions to Rancher Resources](/img/shibboleth-with-openldap-groups.svg) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/drivers/cluster-drivers/cluster-drivers.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md similarity index 66% rename from versioned_docs/version-2.0-2.4/admin-settings/drivers/cluster-drivers/cluster-drivers.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md index 1684b167303..e0a4f920328 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/drivers/cluster-drivers/cluster-drivers.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md @@ -5,7 +5,7 @@ weight: 1 _Available as of v2.2.0_ -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. +Cluster drivers are used to create clusters in a [hosted Kubernetes provider](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. @@ -13,8 +13,8 @@ If there are specific cluster drivers that you do not want to show your users, y >**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Cluster Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. ## Activating/Deactivating Cluster Drivers diff --git a/versioned_docs/version-2.0-2.4/admin-settings/drivers/node-drivers/node-drivers.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md similarity index 86% rename from versioned_docs/version-2.0-2.4/admin-settings/drivers/node-drivers/node-drivers.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md index b2b7368f310..c47ddbc51b0 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/drivers/node-drivers/node-drivers.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md @@ -14,8 +14,8 @@ If there are specific node drivers that you don't want to show to your users, yo >**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Node Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. ## Activating/Deactivating Node Drivers diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md similarity index 96% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md index 0ab942a12a2..aa435277b72 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md @@ -14,7 +14,7 @@ When you share a template, each user can have one of two access levels: If you create a template, you automatically become an owner of that template. -If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising) +If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.](manage-rke1-templates.md) There are several ways to share templates: diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/applying-templates/applying-templates.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md similarity index 78% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/applying-templates/applying-templates.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md index 2b6263637b7..6d59d6af2a9 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/applying-templates/applying-templates.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md @@ -3,7 +3,7 @@ title: Applying Templates weight: 50 --- -You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) +You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.](access-or-share-templates.md) RKE templates can be applied to new clusters. @@ -19,7 +19,7 @@ This section covers the following topics: ### Creating a Cluster from an RKE Template -To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: +To add a cluster [hosted by an infrastructure provider](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using an RKE template, use these steps: 1. From the **Global** view, go to the **Clusters** tab. 1. Click **Add Cluster** and choose the infrastructure provider. @@ -33,7 +33,7 @@ To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2 When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. -- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) +- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.](../../../../pages-for-subheaders/cluster-configuration.md) - If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. @@ -48,7 +48,7 @@ _Available as of v2.3.3_ This section describes how to create an RKE template from an existing cluster. -RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) +RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.](manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) To convert an existing cluster to use an RKE template, @@ -60,4 +60,4 @@ To convert an existing cluster to use an RKE template, - A new RKE template is created. - The cluster is converted to use the new template. -- New clusters can be [created from the new template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file +- New clusters can be [created from the new template.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creator-permissions/creator-permissions.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md similarity index 91% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creator-permissions/creator-permissions.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md index 8823bb1de1c..75dbd23828c 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creator-permissions/creator-permissions.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md @@ -5,7 +5,7 @@ weight: 10 Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. -For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/). +For more information on administrator permissions, refer to the [documentation on global permissions](../manage-role-based-access-control-rbac/global-permissions.md). # Giving Users Permission to Create Templates @@ -13,7 +13,7 @@ Templates can only be created by users who have the global permission **Create R Administrators have the global permission to create templates, and only administrators can give that permission to other users. -For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) +For information on allowing users to modify existing templates, refer to [Sharing Templates.](access-or-share-templates.md) Administrators can give users permission to create RKE templates in two ways: diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/enforcement/enforcement.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md similarity index 67% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/enforcement/enforcement.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md index 7c949d48da2..3d6fb987667 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/enforcement/enforcement.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md @@ -11,13 +11,13 @@ By default, any standard user in Rancher can create clusters. But when RKE templ - All standard users must use an RKE template to create a new cluster. - Standard users cannot create a cluster without using a template. -Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) +Users can only create new templates if the administrator [gives them permission.](creator-permissions.md#allowing-a-user-to-create-templates) -After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) +After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision](apply-templates.md#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.](manage-rke1-templates.md#updating-a-template) # Requiring New Clusters to Use an RKE Template -You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. +You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user](../manage-role-based-access-control-rbac/global-permissions.md) will use the Kubernetes and/or Rancher settings that are vetted by administrators. To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/example-scenarios/example-scenarios.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md similarity index 74% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/example-scenarios/example-scenarios.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md index e40f654f74a..850fe6ce2f6 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/example-scenarios/example-scenarios.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md @@ -22,7 +22,7 @@ Let's say there is an organization in which the administrators decide that all n **Results:** - All Rancher users in the organization have access to the template. -- All new clusters created by [standard users]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. +- All new clusters created by [standard users](../manage-role-based-access-control-rbac/global-permissions.md) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. - All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. @@ -31,7 +31,7 @@ In this way, the administrators enforce the Kubernetes version across the organi Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. -1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. +1. First, an administrator turns on [RKE template enforcement.](enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user](../manage-role-based-access-control-rbac/global-permissions.md) in Rancher will need to use an RKE template when they create a cluster. 1. The administrator then creates two templates: - One template for basic users, with almost every option specified except for access keys @@ -60,12 +60,12 @@ Let's say Alice is a Rancher administrator. She owns an RKE template that reflec Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. -To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) +To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.](access-or-share-templates.md#sharing-ownership-of-templates) The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: -- [Revise the template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change -- [Disable outdated revisions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it -- [Delete the whole template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction -- [Set a certain revision as default]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. -- [Share the template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file +- [Revise the template](manage-rke1-templates.md#updating-a-template) when the best practices change +- [Disable outdated revisions](manage-rke1-templates.md#disabling-a-template-revision) of the template so that no new clusters can be created with it +- [Delete the whole template](manage-rke1-templates.md#deleting-a-template) if the organization wants to go in a different direction +- [Set a certain revision as default](manage-rke1-templates.md#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. +- [Share the template](access-or-share-templates.md) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md similarity index 90% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md index 71c982ca223..38524924ff6 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md @@ -11,7 +11,7 @@ If you want to standardize the hardware in your clusters, use RKE templates conj ### Node Templates -[Node templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. +[Node templates](../../../../reference-guides/user-settings/manage-node-templates.md) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. ### Terraform @@ -55,7 +55,7 @@ When you need to make changes to your infrastructure, instead of manually updati This section describes one way that you can make security and compliance-related config files standard in your clusters. -When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.0-v2.4/en/security/) you have an encryption config file and an audit log config file. +When you create a [CIS benchmark compliant cluster,](../../../../pages-for-subheaders/rancher-security.md) you have an encryption config file and an audit log config file. Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creating-and-revising/creating-and-revising.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md similarity index 89% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creating-and-revising/creating-and-revising.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md index 94f7022a7a3..6b394eeeb97 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/creating-and-revising/creating-and-revising.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md @@ -28,19 +28,19 @@ This section covers the following topics: ### Prerequisites -You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions) +You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.](creator-permissions.md) -You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) +You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.](access-or-share-templates.md#sharing-ownership-of-templates) ### Creating a Template 1. From the **Global** view, click **Tools > RKE Templates.** 1. Click **Add Template.** 1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. -1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. +1. Optional: Share the template with other users or groups by [adding them as members.](access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. 1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. -**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. +**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. ### Updating a Template @@ -131,7 +131,7 @@ To permanently delete a revision, ### Upgrading a Cluster to Use a New Template Revision -> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates) +> This section assumes that you already have a cluster that [has an RKE template applied.](apply-templates.md) > This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. To upgrade a cluster to use a new template revision, @@ -147,7 +147,7 @@ To upgrade a cluster to use a new template revision, You can save an existing cluster's settings as an RKE template. -This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] +This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] To convert an existing cluster to use an RKE template, @@ -159,4 +159,4 @@ To convert an existing cluster to use an RKE template, - A new RKE template is created. - The cluster is converted to use the new template. -- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file +- New clusters can be [created from the new template and revision.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/overrides/overrides.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md similarity index 75% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/overrides/overrides.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md index 3542d45b348..4fa8a4dbca0 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/overrides/overrides.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md @@ -5,7 +5,7 @@ weight: 33 When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** -After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. +After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision](manage-rke1-templates.md) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/pod-security-policies/pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md similarity index 96% rename from versioned_docs/version-2.0-2.4/admin-settings/pod-security-policies/pod-security-policies.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md index fedcf15d179..2b8cf9fe435 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/pod-security-policies/pod-security-policies.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md @@ -57,9 +57,9 @@ Using Rancher, you can create a Pod Security Policy using our GUI rather than cr ### Requirements -Rancher can only assign PSPs for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) +Rancher can only assign PSPs for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) -You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) +You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.](../../../pages-for-subheaders/cluster-configuration.md) It is a best practice to set PSP at the cluster level. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/config-private-registry/config-private-registry.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md similarity index 85% rename from versioned_docs/version-2.0-2.4/admin-settings/config-private-registry/config-private-registry.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md index 09779408bb8..0e5375d0ca2 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/config-private-registry/config-private-registry.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md @@ -10,7 +10,7 @@ There are two main ways to set up private registries in Rancher: by setting up t This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. -For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability) instructions. +For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation](installation/air-gap-single-node) or [air gapped Kubernetes installation](installation/air-gap-high-availability) instructions. If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. @@ -20,15 +20,15 @@ If your private registry requires credentials, it cannot be used as the default 1. Go into the **Settings** view. - {{< img "/img/rancher/airgap/settings.png" "Settings" >}} + ![](/img/airgap/settings.png) 1. Look for the setting called `system-default-registry` and choose **Edit**. - {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} + ![](/img/airgap/edit-system-default-registry.png) 1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} + ![](/img/airgap/enter-system-default-registry.png) **Result:** Rancher will use your private registry to pull system images. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md similarity index 95% rename from versioned_docs/version-2.0-2.4/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md index d2d1c927927..c6336068498 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md @@ -53,7 +53,7 @@ For details on how each cluster role can access Kubernetes resources, you can go ### Giving a Custom Cluster Role to a Cluster Member -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/) cluster owners and admins can then assign those roles to cluster members. +After an administrator [sets up a custom cluster role,](custom-roles.md) cluster owners and admins can then assign those roles to cluster members. To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. @@ -144,7 +144,7 @@ By default, when a standard user creates a new cluster or project, they are auto There are two methods for changing default cluster/project roles: -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. +- **Assign Custom Roles**: Create a [custom role](custom-roles.md) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. @@ -152,7 +152,7 @@ There are two methods for changing default cluster/project roles: >**Note:** > ->- Although you can [lock]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. +>- Although you can [lock](locked-roles.md) a default role, the system still assigns the role to users who create a cluster/project. >- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. ### Configuring Default Roles for Cluster and Project Creators diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rbac/default-custom-roles/default-custom-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md similarity index 93% rename from versioned_docs/version-2.0-2.4/admin-settings/rbac/default-custom-roles/default-custom-roles.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md index 07e666d0873..71cc0ba754d 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rbac/default-custom-roles/default-custom-roles.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md @@ -24,8 +24,8 @@ This section covers the following topics: To complete the tasks on this page, one of the following permissions are required: - - [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. + - [Administrator Global Permissions](global-permissions.md). + - [Custom Global Permissions](global-permissions.md#custom-global-permissions) with the [Manage Roles](global-permissions.md) role assigned. ## Creating A Custom Role for a Cluster or Project @@ -72,7 +72,7 @@ The steps to add custom roles differ depending on the version of Rancher. 1. **Name** the role. -1. Choose whether to set the role to a status of [locked]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/). +1. Choose whether to set the role to a status of [locked](locked-roles.md). > **Note:** Locked roles cannot be assigned to users. @@ -163,8 +163,8 @@ If a user is removed from the external authentication provider group, they would > **Prerequisites:** You can only assign a global role to a group if: > -> * You have set up an [external authentication provider]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) > * You have already set up at least one user group with the authentication provider To assign a custom global role to a group, follow these steps: diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rbac/global-permissions/global-permissions.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md similarity index 94% rename from versioned_docs/version-2.0-2.4/admin-settings/rbac/global-permissions/global-permissions.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md index 125c2cbe699..b9dd90f3d71 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rbac/global-permissions/global-permissions.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md @@ -51,7 +51,7 @@ As of Rancher v2.4.0, you can [assign a role to everyone in the group at the sam Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. -When a user from an [external authentication source]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. +When a user from an [external authentication source](../../../../pages-for-subheaders/about-authentication.md) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. @@ -144,8 +144,8 @@ If a user is removed from the external authentication provider group, they would > **Prerequisites:** You can only assign a global role to a group if: > -> * You have set up an [external authentication provider]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) > * You have already set up at least one user group with the authentication provider To assign a custom global role to a group, follow these steps: diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rbac/locked-roles/locked-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md similarity index 93% rename from versioned_docs/version-2.0-2.4/admin-settings/rbac/locked-roles/locked-roles.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md index 7c787167e9f..0929bca9c05 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rbac/locked-roles/locked-roles.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md @@ -27,7 +27,7 @@ If you want to prevent a role from being assigned to users, you can set it to a You can lock roles in two contexts: -- When you're [adding a custom role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). +- When you're [adding a custom role](custom-roles.md). - When you editing an existing role (see below). 1. From the **Global** view, select **Security** > **Roles**. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/enable-istio-with-psp.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md similarity index 93% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/enable-istio-with-psp.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md index d3a8130ac25..e7fb1c63f90 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/enable-istio-with-psp.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md @@ -48,6 +48,6 @@ The Istio CNI plugin removes the need for each application pod to have a privile ### 3. Install Istio -Follow the [primary instructions]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. +Follow the [primary instructions](enable-istio-in-cluster.md), adding a custom answer: `istio_cni.enabled: true`. After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-in-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md similarity index 56% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-in-cluster.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md index c7a06c44a50..463cf794df6 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-in-cluster.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -10,23 +10,23 @@ aliases: This cluster uses the default Nginx controller to allow traffic into the cluster. -A Rancher [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. +A Rancher [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. # Prerequisites -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.0-v2.4/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning) on which you will install Istio. +This guide assumes you have already [installed Rancher,](../../../pages-for-subheaders/installation-and-upgrade.md) and you have already [provisioned a separate Kubernetes cluster](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on which you will install Istio. -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/) +The nodes in your cluster must meet the [CPU and memory requirements.](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) -> If the cluster has a Pod Security Policy enabled there are [additional prerequisites steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/) +> If the cluster has a Pod Security Policy enabled there are [additional prerequisites steps](enable-istio-in-cluster-with-psp.md) # Enable Istio in the Cluster 1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. 1. Click **Tools > Istio.** -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. +1. Optional: Configure member access and [resource limits](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. 1. Click **Enable**. 1. Click **Save**. @@ -36,4 +36,4 @@ The Istio application, `cluster-istio`, is added as an application to the cluste When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. -### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) +### [Next: Enable Istio in a Namespace](enable-istio-in-namespace.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-namespace/enable-istio-in-namespace.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md similarity index 96% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-namespace/enable-istio-in-namespace.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md index 97a725a79db..ddc35756ceb 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/enable-istio-in-namespace/enable-istio-in-namespace.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -50,4 +50,4 @@ To add the annotation to a workload, > **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. -### [Next: Select the Nodes ]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file +### [Next: Select the Nodes ](node-selectors.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/view-traffic/view-traffic.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/view-traffic/view-traffic.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/node-selectors/node-selectors.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md similarity index 91% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/node-selectors/node-selectors.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md index cae0c5936f5..de315a961cc 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/node-selectors/node-selectors.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md @@ -8,7 +8,7 @@ aliases: - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/ --- -> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources) +> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) This section describes how use node selectors to configure Istio components to be deployed on a designated node. @@ -40,4 +40,4 @@ For larger deployments, it is recommended to schedule each component of Istio on **Result:** The Istio components will be deployed on the Istio node. -### [Next: Add Deployments and Services]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads) \ No newline at end of file +### [Next: Add Deployments and Services](use-istio-sidecar.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/gateway/gateway.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md similarity index 93% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/gateway/gateway.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md index 7bd777e2352..1526f5a4d71 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/gateway/gateway.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -20,7 +20,7 @@ You can route traffic into the service mesh with a load balancer or just Istio's For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) # Enable the Istio Gateway @@ -29,7 +29,7 @@ The ingress gateway is a Kubernetes service that will be deployed in your cluste 1. Go to the cluster where you want to allow outside traffic into Istio. 1. Click **Tools > Istio.** 1. Expand the **Ingress Gateway** section. -1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/) +1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.](../../new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md) 1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. 1. Click **Save.** @@ -132,4 +132,4 @@ In the gateway resource, the selector refers to Istio's default ingress controll 1. Within `istio-system`, there is a workload named `istio-ingressgateway`. 1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) +### [Next: Set up Istio's Components for Traffic Management](set-up-traffic-management.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/set-up-traffic-management/set-up-traffic-management.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md similarity index 96% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/set-up-traffic-management/set-up-traffic-management.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md index 003ec7c7107..be98965cf9b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/set-up-traffic-management/set-up-traffic-management.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -63,4 +63,4 @@ spec: ``` **Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. -### [Next: Generate and View Traffic]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic) +### [Next: Generate and View Traffic](generate-and-view-traffic.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/deploy-workloads/deploy-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md similarity index 98% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/deploy-workloads/deploy-workloads.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md index fa88cd2852c..68d1a60df1e 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/setup/deploy-workloads/deploy-workloads.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -324,4 +324,4 @@ spec: --- ``` -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) +### [Next: Set up the Istio Gateway](set-up-istio-gateway.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-members/cluster-members.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md similarity index 64% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-members/cluster-members.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md index 4f4ebe85591..5824ed96cfd 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-members/cluster-members.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md @@ -9,7 +9,7 @@ aliases: If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. +>**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members](k8s-in-rancher/projects-and-namespaces/project-members/) instead. There are two contexts where you can add cluster members: @@ -33,23 +33,23 @@ Cluster administrators can edit the membership for a cluster, controlling which If external authentication is configured: - - Rancher returns users from your [external authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) source as you type. + - Rancher returns users from your [external authentication](../../../../pages-for-subheaders/about-authentication.md) source as you type. >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/). + >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5](../../authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md). - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). + >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users](../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). 4. Assign the user or group **Cluster** roles. - [What are Cluster Roles?]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) + [What are Cluster Roles?](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles). + > - To add roles to the list, [Add a Custom Role](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). **Result:** The chosen users are added to the cluster. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/ace/ace.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md similarity index 71% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/ace/ace.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md index 866126c4590..9da68ae0ead 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/ace/ace.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -3,7 +3,7 @@ title: How the Authorized Cluster Endpoint Works weight: 2015 --- -This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) +This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) ### About the kubeconfig File @@ -15,12 +15,12 @@ After you download the kubeconfig file, you will be able to use the kubeconfig f _Available as of v2.4.6_ -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](../cli) to be present in your PATH. +If admins have [enforced TTL on kubeconfig tokens](../../../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](cluster-admin/cluster-access/cli) to be present in your PATH. ### Two Authentication Methods for RKE Clusters -If the cluster is not an [RKE cluster,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. +If the cluster is not an [RKE cluster,](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: @@ -29,13 +29,13 @@ For RKE clusters, the kubeconfig file allows you to be authenticated in two ways This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. -To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) +To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) -These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. +These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page](../../../../pages-for-subheaders/rancher-manager-architecture.md#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. ### About the kube-api-auth Authentication Webhook -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) which is only available for [RKE clusters.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/kubectl/kubectl.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md similarity index 89% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/kubectl/kubectl.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md index cb74848ec14..6c815abefd4 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/kubectl/kubectl.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -54,9 +54,9 @@ Rancher will discover and show resources created by `kubectl`. However, these re # Authenticating Directly with a Downstream Cluster -This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) +This section intended to help you set up an alternative method to access an [RKE cluster.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) -This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) +This method is only available for RKE clusters that have the [authorized cluster endpoint](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](authorized-cluster-endpoint.md) We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. @@ -75,7 +75,7 @@ In this example, when you use `kubectl` with the first context, `my-cluster`, yo With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/pod-security-policy/pod-security-policy.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md similarity index 58% rename from versioned_docs/version-2.0-2.4/cluster-admin/pod-security-policy/pod-security-policy.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md index ef2ec5ccc1f..874cb531b78 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/pod-security-policy/pod-security-policy.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md @@ -3,9 +3,9 @@ title: Adding a Pod Security Policy weight: 80 --- -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy](../authentication-permissions-and-global-configuration/create-pod-security-policies.md), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. @@ -15,11 +15,11 @@ You can assign a pod security policy when you provision a cluster. However, if y 3. From **Pod Security Policy Support**, select **Enabled**. - >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). + >**Note:** This option is only available for clusters [provisioned by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). 4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - Rancher ships with [policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. + Rancher ships with [policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) as well. 5. Click **Save**. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/pod-security-policies/pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md similarity index 91% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/pod-security-policies/pod-security-policies.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md index aeb89d2378c..a3a955ed025 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/pod-security-policies/pod-security-policies.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md @@ -10,7 +10,7 @@ _Pod Security Policies_ are objects that control security-sensitive aspects of p When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. >**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). +>Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). >**Note:** >For security purposes, we recommend assigning a PSP as you create your clusters. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/backing-up-etcd/backing-up-etcd.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-admin/backing-up-etcd/backing-up-etcd.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md index 4338cabd32f..ec5122ad08b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/backing-up-etcd/backing-up-etcd.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md @@ -8,7 +8,7 @@ import TabItem from '@theme/TabItem'; _Available as of v2.2.0_ -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) can be easily performed. +In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. @@ -140,7 +140,7 @@ On restore, the following process is used: Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. -By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. +By default, [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. @@ -172,7 +172,7 @@ Rancher supports two different backup targets: ### Local Backup Target -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. +By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. ### S3 Backup Target @@ -222,4 +222,4 @@ This option is not available directly in the UI, and is only available through t # Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restoring-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md similarity index 97% rename from versioned_docs/version-2.0-2.4/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md index b3be3289bd3..a8e880a3c47 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md @@ -27,10 +27,10 @@ When cleaning nodes provisioned using Rancher, the following components are dele | All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | | All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | -[1]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/ +[1]: cluster-provisioning/rke-clusters/node-pools/ +[2]: cluster-provisioning/rke-clusters/custom-nodes/ +[3]: cluster-provisioning/hosted-kubernetes-clusters/ +[4]: cluster-provisioning/imported-clusters/ ## Removing a Node from a Cluster by Rancher UI diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cloning-clusters/cloning-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md similarity index 81% rename from versioned_docs/version-2.0-2.4/cluster-admin/cloning-clusters/cloning-clusters.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md index 11c9c443ef5..e787c544fd2 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cloning-clusters/cloning-clusters.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md @@ -11,16 +11,16 @@ Duplication of imported clusters is not supported. | Cluster Type | Cloneable? | |----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) | ✓ | -| [Custom Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) | ✓ | -| [Imported Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) | | +| [Nodes Hosted by Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) | ✓ | +| [Hosted Kubernetes Providers](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | ✓ | +| [Custom Cluster](../../../pages-for-subheaders/use-existing-nodes.md) | ✓ | +| [Imported Cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) | | > **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. ## Prerequisites -Download and install [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys) if necessary. +Download and install [Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md). Remember to [create an API bearer token](../../../reference-guides/user-settings/api-keys.md) if necessary. ## 1. Export Cluster Config @@ -51,7 +51,7 @@ Begin by using Rancher CLI to export the configuration for the cluster that you Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. -> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) +> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) 1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md similarity index 93% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md index dbb81dcbac5..fbafbeffe4a 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md @@ -3,7 +3,7 @@ title: GlusterFS Volumes weight: 5000 --- -> This section only applies to [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) +> This section only applies to [RKE clusters.](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md similarity index 98% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md index 86c858dc413..db37ec5d8cf 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md @@ -16,7 +16,7 @@ To use an existing PV, your application will need to use a PVC that is bound to For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. -![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) +![Setting Up New and Existing Persistent Storage](/img/rancher-storage.svg) For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md index d819c28d2f9..9d9073b1a18 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md @@ -5,7 +5,7 @@ weight: 2 This section describes how to provision new persistent storage for workloads in Rancher. -This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) +This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. @@ -19,9 +19,9 @@ To provision new storage for your workloads, follow these steps: ### Prerequisites -- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. +- To set up persistent storage, the `Manage Volumes` [role](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. - If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) +- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](cluster-provisioning/rke-clusters/options/cloud-providers/) - Make sure your storage provisioner is available to be enabled. The following storage provisioners are enabled by default: @@ -38,7 +38,7 @@ Local | `local` Network File System | `nfs` hostPath | `host-path` -To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers/) +To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.](installation/options/feature-flags/enable-not-default-storage-drivers/) ### 1. Add a storage class and configure it to use your storage @@ -92,7 +92,7 @@ You can mount PVCs during the deployment of a workload, or following workload cr To attach the PVC to a new workload, -1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +1. Create a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). 1. For **Workload Type**, select **Stateful set of 1 pod**. 1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** 1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md similarity index 94% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md index 14c5fc50c42..2c82e065479 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md @@ -3,7 +3,7 @@ title: iSCSI Volumes weight: 6000 --- -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. +In [Rancher Launched Kubernetes clusters](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md similarity index 92% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md index 407782a7fd5..c21fb887ce9 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md @@ -5,7 +5,7 @@ weight: 1 This section describes how to set up existing persistent storage for workloads in Rancher. -> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) +> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) To set up storage, follow these steps: @@ -16,14 +16,14 @@ To set up storage, follow these steps: ### Prerequisites -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) - If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. ### 1. Set up persistent storage Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. -The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) +The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../provisioning-storage-examples/vsphere-storage.md) [NFS,](../provisioning-storage-examples/nfs-storage.md) or Amazon's [EBS.](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/nfs/nfs.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md similarity index 97% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/nfs/nfs.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md index 579fbcd30a9..fa962766337 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/nfs/nfs.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md @@ -9,7 +9,7 @@ Before you can use the NFS storage volume plug-in with Rancher deployments, you >**Note:** > ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/). +>- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage](../../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md). > >- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/ebs/ebs.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md similarity index 79% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/ebs/ebs.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md index 5bb23411b74..b5a5788c3c5 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/ebs/ebs.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md @@ -13,4 +13,4 @@ This section describes how to set up Amazon's Elastic Block Store in EC2. **Result:** Persistent storage has been created. -For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/attaching-existing-storage/) \ No newline at end of file +For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.](../manage-persistent-storage/set-up-existing-storage.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md similarity index 82% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md index c52508aec77..84af3dd43cc 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md @@ -5,9 +5,9 @@ aliases: - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ --- -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). +To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). -In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) +In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) - [Prerequisites](#prerequisites) - [Creating a StorageClass](#creating-a-storageclass) @@ -17,7 +17,7 @@ In order to dynamically provision storage in vSphere, the vSphere provider must ### Prerequisites -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/). +In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), the [vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md). ### Creating a StorageClass @@ -30,21 +30,21 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 3. Enter a **Name** for the class. 4. Under **Provisioner**, select **VMWare vSphere Volume**. - {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} + ![](/img/vsphere-storage-class.png) 5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. 5. Click **Save**. ### Creating a Workload with a vSphere Volume -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). 2. For **Workload Type**, select **Stateful set of 1 pod**. 3. Expand the **Volumes** section and click **Add Volume**. 4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. 5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. 6. Enter the required **Capacity** for the volume. Then click **Define**. - {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} + ![](/img/workload-add-volume.png) 7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. 8. Click **Launch** to create the workload. @@ -62,7 +62,7 @@ In order to provision vSphere volumes in a cluster created with the [Rancher Kub 9. Once the replacement pod is running, click **Execute Shell**. 10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) + ![workload-persistent-data](/img/workload-persistent-data.png) ### Why to Use StatefulSets Instead of Deployments diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/amazon/amazon.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md similarity index 97% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/amazon/amazon.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md index cd6961ac739..06cd2b0ac0d 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/amazon/amazon.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md @@ -149,7 +149,7 @@ On AWS EC2, we should create a few objects to configure our system. We've define ``` * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) + * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) * Tags: `kubernetes.io/cluster/: owned` * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster @@ -206,7 +206,7 @@ On AWS EC2, we should create a few objects to configure our system. We've define ``` * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) + * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) * Tags: * `kubernetes.io/cluster/: owned` * `k8s.io/cluster-autoscaler/: true` @@ -237,13 +237,13 @@ On AWS EC2, we should create a few objects to configure our system. We've define sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} ``` -More info is at [RKE clusters on AWS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) +More info is at [RKE clusters on AWS](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) ### 3. Deploy Nodes Once we've configured AWS, let's create VMs to bootstrap our cluster: -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/) +* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md) * IAM role: `K8sMasterRole` * Security group: `K8sMasterSg` * Tags: diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/nodes/nodes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md similarity index 82% rename from versioned_docs/version-2.0-2.4/cluster-admin/nodes/nodes.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md index 10fcf3f9be3..a4951bf68a7 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/nodes/nodes.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md @@ -6,9 +6,9 @@ weight: 2030 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) to provision the cluster, there are different node options available. +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. -> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml). +> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters](../../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml). This section covers the following topics: @@ -43,28 +43,28 @@ The following table lists which node options are available for each type of clus | [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | | [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | -[1]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/ +[1]: cluster-provisioning/rke-clusters/node-pools/ +[2]: cluster-provisioning/rke-clusters/custom-nodes/ +[3]: cluster-provisioning/hosted-kubernetes-clusters/ +[4]: cluster-provisioning/imported-clusters/ ### Nodes Hosted by an Infrastructure Provider -Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) +Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) -Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) can be scaled up or down if the node pool is edited. +Clusters provisioned using [one of the node pool options](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) can be scaled up or down if the node pool is edited. -A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. +A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. -Rancher uses [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. +Rancher uses [node templates](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. ### Nodes Provisioned by Hosted Kubernetes Providers -Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. +Options for managing nodes [hosted by a Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. ### Imported Nodes -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. +Although you can deploy workloads to an [imported cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. # Managing and Editing Individual Nodes @@ -79,23 +79,23 @@ To manage individual nodes, browse to the cluster that you want to manage and th # Viewing a Node in the Rancher API -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.0-v2.4/en/api/). +Select this option to view the node's [API endpoints](../../../pages-for-subheaders/about-the-api.md). # Deleting a Node Use **Delete** to remove defective nodes from the cloud provider. -When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) +When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) >**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. # Scaling Nodes -For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) by using the scale controls. This option isn't available for other cluster types. +For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) by using the scale controls. This option isn't available for other cluster types. # SSH into a Node Hosted by an Infrastructure Provider -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. +For [nodes hosted by an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. 1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/projects-and-namespaces/projects-and-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md similarity index 83% rename from versioned_docs/version-2.0-2.4/cluster-admin/projects-and-namespaces/projects-and-namespaces.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md index 545de58bf84..75a1d71b2fd 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/projects-and-namespaces/projects-and-namespaces.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md @@ -39,18 +39,18 @@ You can assign resources at the project level so that each namespace in the proj You can assign the following resources directly to namespaces: -- [Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/) +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. -For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/). +For more information on creating and moving namespaces, see [Namespaces](../manage-projects/manage-namespaces.md). ### Role-based access control issues with namespaces and kubectl @@ -58,7 +58,7 @@ Because projects are a concept introduced by Rancher, kubectl does not have the This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. -If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/) to ensure that you will have permission to access the namespace. +If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](../manage-projects/manage-namespaces.md) to ensure that you will have permission to access the namespace. If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. @@ -75,8 +75,8 @@ In the base version of Kubernetes, features like role-based access rights or clu You can use projects to perform actions such as: -- Assign users to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). +- Assign users to a group of namespaces (i.e., [project membership](k8s-in-rancher/projects-and-namespaces/project-members)). +- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). - Assign resources to the project. - Assign Pod Security Policies. @@ -124,7 +124,7 @@ Standard users are only authorized for project access in two situations: # Pod Security Policies -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. +Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policluster-admin/pod-security-policy/) at the [project level](../manage-projects/manage-pod-security-policies.md) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. # Creating Projects @@ -145,7 +145,7 @@ This section describes how to create a new project with a name and with optional ### 2. Optional: Select a Pod Security Policy -This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). +This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). Assigning a PSP to a project will: @@ -165,26 +165,26 @@ By default, your user is added as the project `Owner`. > >- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. > ->- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). +>- Choose `Custom` to create a custom role on the fly: [Custom Project Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#custom-project-roles). To add members: 1. Click **Add Member**. 1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. -1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) +1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) ### 4. Optional: Add Resource Quotas _Available as of v2.1.0_ -Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). +Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas). To add a resource quota, 1. Click **Add Quota**. -1. Select a Resource Type. For more information, see [Resource Quotas.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). +1. Select a Resource Type. For more information, see [Resource Quotas.](k8s-in-rancher/projects-and-namespaces/resource-quotas/). 1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.0-v2.4/en/project-admin/resource-quotas/) Note: This option is available as of v2.2.0. +1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit](../../../pages-for-subheaders/manage-project-resource-quotas.md) Note: This option is available as of v2.2.0. 1. Click **Create**. **Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. @@ -198,7 +198,7 @@ To add a resource quota, To switch between clusters and projects, use the **Global** drop-down available in the main menu. -![Global Menu]({{}}/img/rancher/global-menu.png) +![Global Menu](/img/global-menu.png) Alternatively, you can switch between projects and clusters using the main menu. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/restoring-etcd/restoring-etcd.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md similarity index 67% rename from versioned_docs/version-2.0-2.4/cluster-admin/restoring-etcd/restoring-etcd.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md index 43794b7e711..7aeb7f26a97 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/restoring-etcd/restoring-etcd.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md @@ -8,9 +8,9 @@ import TabItem from '@theme/TabItem'; _Available as of v2.2.0_ -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. +etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots), but [one-time snapshots]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](backing-up-etcd.md#configuring-recurring-snapshots), but [one-time snapshots](backing-up-etcd.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). As of Rancher v2.4.0, clusters can also be restored to a prior Kubernetes version and cluster configuration. @@ -44,9 +44,9 @@ Snapshots are composed of the cluster data in etcd, the Kubernetes version, and - **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. - **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. -When rolling back to a prior Kubernetes version, the [upgrade strategy options]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. +When rolling back to a prior Kubernetes version, the [upgrade strategy options](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. -> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) +> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](backing-up-etcd.md#configuring-recurring-snapshots) 1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. @@ -65,8 +65,8 @@ When rolling back to a prior Kubernetes version, the [upgrade strategy options]( > **Prerequisites:** > -> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. -> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) +> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), new etcd nodes will automatically be created. For [custom clusters](../../../pages-for-subheaders/use-existing-nodes.md), please ensure that you add new etcd nodes to the cluster. +> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](backing-up-etcd.md#configuring-recurring-snapshots) 1. In the **Global** view, navigate to the cluster that you want to restore from a snapshot. @@ -109,8 +109,8 @@ If the group of etcd nodes loses quorum, the Kubernetes cluster will report a fa 5. Run the revised command. -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. +6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster](../../../pages-for-subheaders/use-existing-nodes.md) and you want to reuse an old node, you are required to [clean up the nodes](faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. # Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restoring-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/certificate-rotation/certificate-rotation.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-admin/certificate-rotation/certificate-rotation.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md diff --git a/versioned_docs/version-2.0-2.4/project-admin/project-members/project-members.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md similarity index 80% rename from versioned_docs/version-2.0-2.4/project-admin/project-members/project-members.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md index a0a4a0922d7..924c39d151c 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/project-members/project-members.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md @@ -10,11 +10,11 @@ If you want to provide a user with access and permissions to _specific_ projects You can add members to a project as it is created, or add them to an existing project. ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-members/) instead. +>**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members](cluster-provisioning/cluster-members/) instead. ### Adding Members to a New Project -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.](k8s-in-rancher/projects-and-namespaces/) ### Adding Members to an Existing Project @@ -36,7 +36,7 @@ Following project creation, you can add users as project members so that they ca 1. Assign the user or group **Project** roles. - [What are Project Roles?]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) + [What are Project Roles?](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) >**Notes:** > @@ -46,8 +46,8 @@ Following project creation, you can add users as project members so that they ca > >- For `Custom` roles, you can modify the list of individual roles available for assignment. > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/). + > - To add roles to the list, [Add a Custom Role](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). **Result:** The chosen users are added to the project. diff --git a/versioned_docs/version-2.0-2.4/project-admin/pipelines/pipelines.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md similarity index 87% rename from versioned_docs/version-2.0-2.4/project-admin/pipelines/pipelines.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md index 252f94f1320..34b2d14a611 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/pipelines/pipelines.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md @@ -17,4 +17,4 @@ After configuring Rancher and GitHub, you can deploy containers running Jenkins - Run unit tests. - Run regression tests. -For details, refer to the [pipelines]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines) section. \ No newline at end of file +For details, refer to the [pipelines](k8s-in-rancher/pipelines) section. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/project-admin/namespaces/namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md similarity index 66% rename from versioned_docs/version-2.0-2.4/project-admin/namespaces/namespaces.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md index db8845713e2..a8c6b494af3 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/namespaces/namespaces.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -9,25 +9,25 @@ Although you assign resources at the project level so that each namespace in the Resources that you can assign directly to namespaces include: -- [Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/) +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. -> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces) to ensure that you will have permission to access the namespace. +> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. ### Creating Namespaces Create a new namespace to isolate apps and resources in a project. ->**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. +>**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads](../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md), [certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md), [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md), etc.) you can create a namespace on the fly. 1. From the **Global** view, open the project where you want to create a namespace. @@ -35,7 +35,7 @@ Create a new namespace to isolate apps and resources in a project. 1. From the main menu, select **Namespace**. The click **Add Namespace**. -1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). +1. **Optional:** If your project has [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). 1. Enter a **Name** and then click **Create**. @@ -54,7 +54,7 @@ Cluster admins and members may occasionally need to move a namespace to another >**Notes:** > >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. + >- You cannot move a namespace into a project that already has a [resource quota](k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. 1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. @@ -65,4 +65,4 @@ Cluster admins and members may occasionally need to move a namespace to another You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. -For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.0-v2.4/en/project-admin//resource-quotas/override-namespace-default/). \ No newline at end of file +For more information, see how to [edit namespace resource quotas](project-admin//resource-quotas/override-namespace-default/). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/project-admin/pod-security-policies/pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md similarity index 82% rename from versioned_docs/version-2.0-2.4/project-admin/pod-security-policies/pod-security-policies.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md index 5d57af5e419..1eb528cbbe4 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/pod-security-policies/pod-security-policies.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -3,14 +3,14 @@ title: Pod Security Policies weight: 5600 --- -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). +> These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. ### Prerequisites -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy). +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster](../manage-clusters/add-a-pod-security-policy.md). ### Applying a Pod Security Policy diff --git a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md similarity index 95% rename from versioned_docs/version-2.0-2.4/project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md index 63a18ba0f49..ff0fa52c3f1 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -10,7 +10,7 @@ In a standard Kubernetes deployment, resource quotas are applied to individual n In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) +![Native Kubernetes Resource Quota Implementation](/img/kubernetes-resource-quota.svg) Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. @@ -28,7 +28,7 @@ The resource quota includes two limits, which you set while creating or editing In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.png) +![Rancher Resource Quota Implementation](/img/rancher-resource-quota.png) Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. diff --git a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-namespace-default/override-namespace-default.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md similarity index 66% rename from versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-namespace-default/override-namespace-default.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md index c65eba128f4..41cc2304e29 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-namespace-default/override-namespace-default.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -5,16 +5,16 @@ weight: 2 Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) for `Namespace 3` so that the namespace can access more resources. +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](k8s-in-rancher/projects-and-namespaces/) for `Namespace 3` so that the namespace can access more resources. Namespace Default Limit Override -![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) +![Namespace Default Limit Override](/img/rancher-resource-quota-override.svg) -How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) +How to: [Editing Namespace Resource Quotas](k8s-in-rancher/projects-and-namespaces/) ### Editing Namespace Resource Quotas -If there is a [resource quota]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. +If there is a [resource quota](k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. 1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. @@ -24,7 +24,7 @@ If there is a [resource quota]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher 1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - For more information about each **Resource Type**, see [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). + For more information about each **Resource Type**, see [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas/). >**Note:** > diff --git a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/quota-type-reference/quota-type-reference.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md similarity index 100% rename from versioned_docs/version-2.0-2.4/project-admin/resource-quotas/quota-type-reference/quota-type-reference.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md diff --git a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-container-default/override-container-default.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md similarity index 96% rename from versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-container-default/override-container-default.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md index 6b4d3c71013..021065930c8 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/override-container-default/override-container-default.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -13,7 +13,7 @@ To avoid setting these limits on each and every container during workload creati _Available as of v2.2.0_ -Edit [container default resource limit]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) when: +Edit [container default resource limit](k8s-in-rancher/projects-and-namespaces/resource-quotas/) when: - You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. - You want to edit the default container resource limit. diff --git a/versioned_docs/version-2.0-2.4/backups/backup/docker-backups/docker-backups.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md similarity index 95% rename from versioned_docs/version-2.0-2.4/backups/backup/docker-backups/docker-backups.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md index d1ae9b2816b..6e01afb9c06 100644 --- a/versioned_docs/version-2.0-2.4/backups/backup/docker-backups/docker-backups.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md @@ -39,7 +39,7 @@ Write down or copy this information before starting the [procedure below](#creat Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) +![Placeholder Reference](/img/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -87,4 +87,4 @@ This procedure creates a backup that you can restore if Rancher encounters a dis docker start ``` -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/restorations/single-node-restoration) if you need to restore backup data. +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs](backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/versioned_docs/version-2.0-2.4/backups/backup/k3s-backups/k3s-backups.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md similarity index 96% rename from versioned_docs/version-2.0-2.4/backups/backup/k3s-backups/k3s-backups.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md index 65fd599b0b0..63856627d6c 100644 --- a/versioned_docs/version-2.0-2.4/backups/backup/k3s-backups/k3s-backups.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md @@ -23,7 +23,7 @@ We recommend configuring the database to take recurring snapshots. One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral.
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) +![Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) ### Creating Snapshots and Restoring Databases from Snapshots diff --git a/versioned_docs/version-2.0-2.4/backups/backup/rke-backups/rke-backups.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md similarity index 99% rename from versioned_docs/version-2.0-2.4/backups/backup/rke-backups/rke-backups.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md index 9d414038c88..700f0ccc06e 100644 --- a/versioned_docs/version-2.0-2.4/backups/backup/rke-backups/rke-backups.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -22,7 +22,7 @@ This section describes how to create backups of your high-availability Rancher i In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails.
Cluster Data within an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) +![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) # Requirements diff --git a/versioned_docs/version-2.0-2.4/backups/restore/docker-restores/docker-restores.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md similarity index 85% rename from versioned_docs/version-2.0-2.4/backups/restore/docker-restores/docker-restores.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md index 6d557aa5111..ec80eca402b 100644 --- a/versioned_docs/version-2.0-2.4/backups/restore/docker-restores/docker-restores.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md @@ -27,7 +27,7 @@ Cross reference the image and reference table below to learn how to obtain this Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) +![Placeholder Reference](/img/placeholder-ref.png) | Placeholder | Example | Description | | -------------------------- | -------------------------- | --------------------------------------------------------- | @@ -41,7 +41,7 @@ You can obtain `` and `` by loggi ## Restoring Backups -Using a [backup]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. +Using a [backup](backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. 1. Using a remote Terminal connection, log into the node running your Rancher Server. @@ -50,9 +50,9 @@ Using a [backup]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node- ``` docker stop ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs](backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. + If you followed the naming convention we suggested in [Creating Backups—Docker Installs](backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. 1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. diff --git a/versioned_docs/version-2.0-2.4/backups/restore/k3s-restore/k3s-restore.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md similarity index 100% rename from versioned_docs/version-2.0-2.4/backups/restore/k3s-restore/k3s-restore.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md diff --git a/versioned_docs/version-2.0-2.4/backups/restore/rke-restore/rke-restore.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md similarity index 91% rename from versioned_docs/version-2.0-2.4/backups/restore/rke-restore/rke-restore.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md index c6de8e35c69..dd3c28b461a 100644 --- a/versioned_docs/version-2.0-2.4/backups/restore/rke-restore/rke-restore.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -15,7 +15,7 @@ aliases: This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. This will restore the Kubernetes configuration and the Rancher database and state. -> **Note:** This document covers clusters set up with RKE >= v0.2.x, for older RKE versions refer to the [RKE Documentation]({{}}/rke/latest/en/etcd-snapshots/restoring-from-backup). +> **Note:** This document covers clusters set up with RKE >= v0.2.x, for older RKE versions refer to the [RKE Documentation](https://rancher.com/docs/rke/latest/en/etcd-snapshots/restoring-from-backup). ## Restore Outline @@ -30,11 +30,11 @@ This will restore the Kubernetes configuration and the Rancher database and stat ### 1. Preparation -It is advised that you run the restore from your local host or a jump box/bastion where your cluster yaml, rke statefile, and kubeconfig are stored. You will need [RKE]({{}}/rke/latest/en/installation/) and [kubectl]({{}}/rancher/v2.0-v2.4/en/faq/kubectl/) CLI utilities installed locally. +It is advised that you run the restore from your local host or a jump box/bastion where your cluster yaml, rke statefile, and kubeconfig are stored. You will need [RKE](https://rancher.com/docs/rke/latest/en/installation/) and [kubectl](../../../faq/install-and-configure-kubectl.md) CLI utilities installed locally. Prepare by creating 3 new nodes to be the target for the restored Rancher instance. We recommend that you start with fresh nodes and a clean state. For clarification on the requirements, review the [Installation Requirements](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation/requirements/). -Alternatively you can re-use the existing nodes after clearing Kubernetes and Rancher configurations. This will destroy the data on these nodes. See [Node Cleanup]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) for the procedure. +Alternatively you can re-use the existing nodes after clearing Kubernetes and Rancher configurations. This will destroy the data on these nodes. See [Node Cleanup](faq/cleaning-cluster-nodes/) for the procedure. You must restore each of your etcd nodes to the same snapshot. Copy the snapshot you're using from one of your nodes to the others before running the `etcd snapshot-restore` command. @@ -105,12 +105,12 @@ S3 specific options are only available for RKE v0.2.0+. | `--bucket-name` value | Specify s3 bucket name | *| | `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| | `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK](https://rancher.com/docs/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check](https://rancher.com/docs/rke/latest/en/config-options/#supported-docker-versions) | #### Testing the Cluster -Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl]({{}}/rancher/v2.0-v2.4/en/faq/kubectl/#configuration) for details. +Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl](../../../faq/install-and-configure-kubectl.md#configuration) for details. #### Check Kubernetes Pods diff --git a/versioned_docs/version-2.0-2.4/backups/restore/rke-restore/v2.0-v2.1/v2.0-v2.1.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md similarity index 97% rename from versioned_docs/version-2.0-2.4/backups/restore/rke-restore/v2.0-v2.1/v2.0-v2.1.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md index bfc20ae2b48..490094574bb 100644 --- a/versioned_docs/version-2.0-2.4/backups/restore/rke-restore/v2.0-v2.1/v2.0-v2.1.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md @@ -28,7 +28,7 @@ Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.o 2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). +3. Rollback Rancher following the [normal instructions](upgrades/rollbacks/). 4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. diff --git a/versioned_docs/version-2.0-2.4/deploy-across-clusters/deploy-across-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md similarity index 82% rename from versioned_docs/version-2.0-2.4/deploy-across-clusters/deploy-across-clusters.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md index f02706abbda..558e681dc5b 100644 --- a/versioned_docs/version-2.0-2.4/deploy-across-clusters/deploy-across-clusters.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md @@ -11,7 +11,7 @@ Typically, most applications are deployed on a single Kubernetes cluster, but th Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. -After creating a multi-cluster application, you can program a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/) to make it easier to access the application. +After creating a multi-cluster application, you can program a [Global DNS entry](helm-charts-in-rancher/globaldns.md) to make it easier to access the application. - [Prerequisites](#prerequisites) - [Launching a multi-cluster app](#launching-a-multi-cluster-app) @@ -32,8 +32,8 @@ After creating a multi-cluster application, you can program a [Global DNS entry] To create a multi-cluster app in Rancher, you must have at least one of the following permissions: -- A [project-member role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the clusters(s) that include the target project(s) +- A [project-member role](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the clusters(s) that include the target project(s) # Launching a Multi-Cluster App @@ -75,15 +75,15 @@ In the **Upgrades** section, select the upgrade strategy to use, when you decide ### Roles -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications](catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. -- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. +- **Project** - This is the equivalent of a [project member](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), a [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or a [project owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles), then the user is considered to have the appropriate level of permissions. -- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. +- **Cluster** - This is the equivalent of a [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), then the user is considered to have the appropriate level of permissions. When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. @@ -101,7 +101,7 @@ If the Helm chart that you are deploying contains a `questions.yml` file, Ranche ### Key Value Pairs for Native Helm Charts -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.0-v2.4/en/catalog/custom/), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository](catalog/custom/), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. ### Members diff --git a/versioned_docs/version-2.0-2.4/helm-charts/adding-catalogs/adding-catalogs.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md similarity index 61% rename from versioned_docs/version-2.0-2.4/helm-charts/adding-catalogs/adding-catalogs.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md index 139fcc73361..fe55d097156 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/adding-catalogs/adding-catalogs.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md @@ -24,7 +24,7 @@ Custom catalogs can be added into Rancher at a global scope, cluster scope, or p Adding a catalog is as simple as adding a catalog name, a URL and a branch name. -**Prerequisite:** An [admin]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) of Rancher has the ability to add or remove catalogs globally in Rancher. +**Prerequisite:** An [admin](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) of Rancher has the ability to add or remove catalogs globally in Rancher. ### Add Custom Git Repositories The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. @@ -42,7 +42,7 @@ _Available as of v2.2.0_ Private catalog repositories can be added using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server supports that. -For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) +For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.](catalog/catalog-config) 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. 2. Click **Add Catalog**. @@ -52,18 +52,18 @@ For more information on private Git/Helm catalogs, refer to the [custom catalog # Adding Global Catalogs ->**Prerequisites:** In order to manage the [built-in catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: +>**Prerequisites:** In order to manage the [built-in catalogs](catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) role assigned. 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. 2. Click **Add Catalog**. 3. Complete the form. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) 4. Click **Create**. - **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or [applications in any project]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps/) from this catalog. + **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps](catalog/multi-cluster-apps/) or [applications in any project](catalog/launching-apps/) from this catalog. # Adding Cluster Level Catalogs @@ -71,18 +71,18 @@ _Available as of v2.2.0_ >**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Custom Cluster Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Cluster Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) +>- [Custom Cluster Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) with the [Manage Cluster Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-role-reference) role assigned. 1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. 2. Choose the **Tools > Catalogs** in the navigation bar. 2. Click **Add Catalog**. 3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) 5. Click **Create**. -**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) from this catalog. +**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster](catalog/apps/) from this catalog. # Adding Project Level Catalogs @@ -90,20 +90,20 @@ _Available as of v2.2.0_ >**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Project Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) ->- [Custom Project Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Cluster Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) +>- [Project Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) +>- [Custom Project Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) with the [Manage Project Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) role assigned. 1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. 2. Choose the **Tools > Catalogs** in the navigation bar. 2. Click **Add Catalog**. 3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) 5. Click **Create**. -**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) from this catalog. +**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project](catalog/apps/) from this catalog. # Custom Catalog Configuration Reference -Refer to [this page]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file +Refer to [this page](catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/helm-charts/built-in/built-in.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md similarity index 71% rename from versioned_docs/version-2.0-2.4/helm-charts/built-in/built-in.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md index 597d2d6cf1d..9688a359ad8 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/built-in/built-in.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md @@ -13,14 +13,14 @@ Within Rancher, there are default catalogs packaged as part of Rancher. These ca >**Prerequisites:** In order to manage the built-in catalogs or manage global catalogs, you need _one_ of the following permissions: > ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions-reference) role assigned. +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions-reference) role assigned. 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. 2. Toggle the default catalogs that you want to be enabled or disabled: - - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#rancher-charts) over native Helm charts. + - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages](helm-charts/legacy-catalogs/creating-apps/#rancher-charts) over native Helm charts. - **Helm Stable:** This catalog, which is maintained by the Kubernetes community, includes native [Helm charts](https://helm.sh/docs/chart_template_guide/). This catalog features the largest pool of apps. - **Helm Incubator:** Similar in user experience to Helm Stable, but this catalog is filled with applications in **beta**. diff --git a/versioned_docs/version-2.0-2.4/helm-charts/catalog-config/catalog-config.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md similarity index 87% rename from versioned_docs/version-2.0-2.4/helm-charts/catalog-config/catalog-config.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md index d5f0a99a177..2549634b41a 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/catalog-config/catalog-config.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md @@ -38,7 +38,7 @@ In Rancher, you can add the custom Helm chart repository with only a catalog nam # Catalog Fields -When [adding your catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/) to Rancher, you'll provide the following information: +When [adding your catalog](catalog/custom/adding/) to Rancher, you'll provide the following information: | Variable | Description | @@ -49,7 +49,7 @@ When [adding your catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/add | Username (Optional) | Username or OAuth Token | | Password (Optional) | If you are authenticating using a username, enter the associated password. If you are using an OAuth token, use `x-oauth-basic`. | | Branch | For a Git repository, the branch name. Default: `master`. For a Helm Chart repository, this field is ignored. | -| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) | +| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.](helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) | # Private Repositories @@ -59,7 +59,7 @@ Private Git or Helm chart repositories can be added into Rancher using either cr ### Using Username and Password -1. When [adding the catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/), select the **Use private catalog** checkbox. +1. When [adding the catalog](catalog/custom/adding/), select the **Use private catalog** checkbox. 2. Provide the `Username` and `Password` for your Git or Helm repository. @@ -70,6 +70,6 @@ Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-buil 1. Create an [OAuth token](https://github.com/settings/tokens) with `repo` permission selected, and click **Generate token**. -2. When [adding the catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/), select the **Use private catalog** checkbox. +2. When [adding the catalog](catalog/custom/adding/), select the **Use private catalog** checkbox. 3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/versioned_docs/version-2.0-2.4/helm-charts/creating-apps/creating-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md similarity index 97% rename from versioned_docs/version-2.0-2.4/helm-charts/creating-apps/creating-apps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md index 2b7c2e37b3a..3fc6132aed6 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/creating-apps/creating-apps.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md @@ -71,7 +71,7 @@ Before you create your own custom catalog, you should have a basic understanding
Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
- ![app-readme.md]({{}}/img/rancher/app-readme.png) + ![app-readme.md](/img/app-readme.png) - `questions.yml` @@ -80,7 +80,7 @@ Before you create your own custom catalog, you should have a basic understanding
Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
- ![questions.yml]({{}}/img/rancher/questions.png) + ![questions.yml](/img/questions.png) ### questions.yml @@ -128,4 +128,4 @@ This reference contains variables that you can use in `questions.yml` nested und # Tutorial: Example Custom Chart Creation -For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/catalog/tutorial) +For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.](catalog/tutorial) diff --git a/versioned_docs/version-2.0-2.4/helm-charts/globaldns/globaldns.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md similarity index 87% rename from versioned_docs/version-2.0-2.4/helm-charts/globaldns/globaldns.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md index f5de5931b50..787df5a6ada 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/globaldns/globaldns.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md @@ -10,7 +10,7 @@ _Available as of v2.2.0_ Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. -> **Note:** Global DNS is only available in [Kubernetes installations]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) with the `local` cluster enabled. +> **Note:** Global DNS is only available in [Kubernetes installations](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) with the `local` cluster enabled. - [Global DNS Providers](#global-dns-providers) - [Global-DNS-Entries](#global-dns-entries) @@ -39,11 +39,11 @@ The following table lists the first version of Rancher each provider debuted. # Global DNS Entries -For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. +For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application](catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. # Permissions for Global DNS Providers and Entries -By default, only [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. +By default, only [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. # Setting up Global DNS for Applications @@ -61,7 +61,7 @@ By default, only [global administrators]({{}}/rancher/v2.0-v2.4/en/admi # Editing a Global DNS Provider -The [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: +The [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: - Root Domain - Access Key & Secret Key @@ -74,7 +74,7 @@ The [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rb # Editing a Global DNS Entry -The [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: +The [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: - FQDN - Global DNS Provider @@ -97,7 +97,7 @@ Permission checks are relaxed for removing target projects in order to support s |----------|--------------------| | FQDN | Enter the **FQDN** you wish to program on the external DNS. | | Provider | Select a Global DNS **Provider** from the list. | -| Resolves To | Select if this DNS entry will be for a [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). | +| Resolves To | Select if this DNS entry will be for a [multi-cluster application](catalog/multi-cluster-apps/) or for workloads in different [projects](k8s-in-rancher/projects-and-namespaces/). | | Multi-Cluster App Target | The target for the global DNS entry. You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. | | DNS TTL | Configure the DNS time to live value in seconds. By default, it will be 300 seconds. | | Member Access | Search for any users that you want to have the ability to manage this Global DNS entry. | @@ -158,4 +158,4 @@ In order for the DNS to be programmed, the following requirements must be met: * The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. * The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. -Once the ingress in your [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or in your target projects is in an `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. \ No newline at end of file +Once the ingress in your [multi-cluster application](catalog/multi-cluster-apps/) or in your target projects is in an `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/helm-charts/launching-apps/launching-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md similarity index 83% rename from versioned_docs/version-2.0-2.4/helm-charts/launching-apps/launching-apps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md index f93b41ddbe4..aa63c7ab22e 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/launching-apps/launching-apps.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md @@ -10,9 +10,9 @@ aliases: import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-scopes). +Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs](helm-charts/legacy-catalogs/#catalog-scopes). -If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). +If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry](globaldns.md). - [Prerequisites](#prerequisites) - [Launching a catalog app](#launching-a-catalog-app) @@ -24,10 +24,10 @@ When Rancher deploys a catalog app, it launches an ephemeral instance of a Helm To launch an app from a catalog in Rancher, you must have at least one of the following permissions: -- A [project-member role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the cluster that include the target project +- A [project-member role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the cluster that include the target project -Before launching an app, you'll need to either [enable a built-in global catalog]({{}}/rancher/v2.0-v2.4/en/catalog/built-in) or [add your own custom catalog.]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs) +Before launching an app, you'll need to either [enable a built-in global catalog](catalog/built-in) or [add your own custom catalog.](catalog/adding-catalogs) # Launching a Catalog App @@ -69,7 +69,7 @@ If the Helm chart that you are deploying contains a `questions.yml` file, Ranche ### Key Value Pairs for Native Helm Charts -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/catalog-config/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository](helm-charts/legacy-catalogs/catalog-config/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values.
diff --git a/versioned_docs/version-2.0-2.4/helm-charts/managing-apps/managing-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/managing-apps.md similarity index 100% rename from versioned_docs/version-2.0-2.4/helm-charts/managing-apps/managing-apps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/managing-apps.md diff --git a/versioned_docs/version-2.0-2.4/helm-charts/multi-cluster-apps/multi-cluster-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md similarity index 59% rename from versioned_docs/version-2.0-2.4/helm-charts/multi-cluster-apps/multi-cluster-apps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md index 4bcd3b4b420..500d4586fe0 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/multi-cluster-apps/multi-cluster-apps.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md @@ -7,4 +7,4 @@ aliases: --- _Available as of v2.2.0_ -The documentation about multi-cluster apps has moved [here.]({{}}/rancher/v2.0-v2.4/en/deploy-across-clusters/multi-cluster-apps) +The documentation about multi-cluster apps has moved [here.](deploy-across-clusters/multi-cluster-apps) diff --git a/versioned_docs/version-2.0-2.4/helm-charts/tutorial/tutorial.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md similarity index 91% rename from versioned_docs/version-2.0-2.4/helm-charts/tutorial/tutorial.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md index 09b5c493fc1..e653bbac65a 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/tutorial/tutorial.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md @@ -12,7 +12,7 @@ You can fill your custom catalogs with either Helm Charts or Rancher Charts, alt > For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). -1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in the [Chart Directory Structure]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#chart-directory-structure). +1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in the [Chart Directory Structure](helm-charts/legacy-catalogs/creating-apps/#chart-directory-structure). Rancher requires this directory structure, although `app-readme.md` and `questions.yml` are optional. @@ -42,7 +42,7 @@ You can fill your custom catalogs with either Helm Charts or Rancher Charts, alt The example below creates a form that prompts users for persistent volume size and a storage class.

- For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#question-variable-reference). + For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference](helm-charts/legacy-catalogs/creating-apps/#question-variable-reference). ```yaml categories: diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md similarity index 96% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md index f34ca57b1ae..0586afbd701 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md @@ -97,19 +97,19 @@ Next, add your Linux nodes to both target groups. Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} +![](/img/ha/nlb/edit-targetgroup-443.png) Select the instances (Linux nodes) you want to add, and click **Add to registered**. *** **Screenshot Add targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} +![](/img/ha/nlb/add-targets-targetgroup-443.png) *** **Screenshot Added targets to target group TCP port 443**
-{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} +![](/img/ha/nlb/added-targets-targetgroup-443.png) When the instances are added, click **Save** on the bottom right of the screen. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md similarity index 86% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md index 89bcefa5587..fea1cd41964 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md @@ -7,7 +7,7 @@ This tutorial is intended to help you provision the underlying infrastructure fo The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. -For more information about each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation) +For more information about each installation option, refer to [this page.](../../../pages-for-subheaders/installation-and-upgrade.md) > **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). @@ -20,9 +20,9 @@ To install the Rancher management server on a high-availability K3s cluster, we ### 1. Set up Linux Nodes -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. ### 2. Set up External Datastore @@ -32,9 +32,9 @@ For a high-availability K3s installation, you will need to set a [MySQL](https:/ When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. -For an example of one way to set up the MySQL database, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/rds/) for setting up MySQL on Amazon's RDS service. +For an example of one way to set up the MySQL database, refer to this [tutorial](installation/options/rds/) for setting up MySQL on Amazon's RDS service. -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) ### 3. Set up the Load Balancer @@ -47,11 +47,11 @@ When Rancher is installed (also in a later step), the Rancher system creates an For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination) +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](installation/options/chart-options/#external-tls-termination) -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md index 0ca01f9a5d8..b91dab8a813 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md @@ -24,9 +24,9 @@ The etcd database requires an odd number of nodes so that it can always elect a ### 1. Set up Linux Nodes -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. ### 2. Set up the Load Balancer @@ -39,11 +39,11 @@ When Rancher is installed (also in a later step), the Rancher system creates an For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination) +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](installation/options/chart-options/#external-tls-termination) -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) > **Important:** > Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md similarity index 95% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md index cb88c11a857..c56672ef429 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md @@ -33,4 +33,4 @@ This information will be used to connect to the database in the following format mysql://username:password@tcp(hostname:3306)/database-name ``` -For more information on configuring the datastore for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) +For more information on configuring the datastore for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/nginx.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/nginx.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md index 0b9927cb880..e3858905588 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/installation/options/ec2-node --- -In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. @@ -15,8 +15,8 @@ If the Rancher server is installed in a single Docker container, you only need o ### 1. Optional Preparation -- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/#port-requirements) +- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.](cluster-provisioning/rke-clusters/options/cloud-providers/) +- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.](../../../pages-for-subheaders/installation-requirements.md#port-requirements) ### 2. Provision Instances @@ -29,7 +29,7 @@ If the Rancher server is installed in a single Docker container, you only need o 1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. 1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. 1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** -1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/#port-requirements) for Rancher nodes. +1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements](../../../pages-for-subheaders/installation-requirements.md#port-requirements) for Rancher nodes. 1. Click **Review and Launch.** 1. Click **Launch.** 1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md index e572c961b0e..f275fe2ab5d 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md @@ -11,7 +11,7 @@ Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. -For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture) +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md) ### Recommended Architecture @@ -21,5 +21,5 @@ For information on how Rancher works, regardless of the installation method, ref - The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment.
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) +![High-availability Kubernetes Installation of Rancher](/img/ha/rancher2ha.svg) Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md index 7b9a8c3eb30..ef6e6daa845 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md @@ -4,7 +4,7 @@ shortTitle: Set up K3s for Rancher weight: 2 --- -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) For systems without direct internet access, refer to the air gap installation instructions. @@ -17,7 +17,7 @@ For systems without direct internet access, refer to the air gap installation in # Prerequisites -These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) +These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.](../infrastructure-setup/ha-k3s-kubernetes-cluster.md) Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. # Installing Kubernetes @@ -101,7 +101,7 @@ users: kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces ``` -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. ### 4. Check the Health of Your Cluster Pods diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md similarity index 89% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md index 9a8b8e758c6..7a0c53fe5eb 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md @@ -15,7 +15,7 @@ As of Rancher v2.4, the Rancher management server can be installed on either an The Rancher management server can only be run on Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. -For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) +For systems without direct internet access, refer to [Air Gap: Kubernetes install.](installation/air-gap-high-availability/) > **Single-node Installation Tip:** > In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. @@ -30,7 +30,7 @@ For systems without direct internet access, refer to [Air Gap: Kubernetes instal Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -Also install [RKE,]({{}}/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. +Also install [RKE,](https://rancher.com/docs/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. ### 1. Create the cluster configuration file @@ -83,9 +83,9 @@ ingress: > **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. > -> Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. +> Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. > -> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/). +> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide](installation/options/etcd/). ### 2. Run RKE @@ -159,15 +159,15 @@ This confirms that you have successfully installed a Kubernetes cluster that the Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. ### Issues or errors? -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. +See the [Troubleshooting](installation/options/troubleshooting/) page. -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/k8s-install/helm-rancher/) +### [Next: Install Rancher](installation/k8s-install/helm-rancher/) diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/recommended-architecture/recommended-architecture.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md similarity index 83% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/production/recommended-architecture/recommended-architecture.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md index 7e5fc92f897..bbb9851ed5b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/recommended-architecture/recommended-architecture.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md @@ -9,7 +9,7 @@ There are three roles that can be assigned to nodes: `etcd`, `controlplane` and When designing your cluster(s), you have two options: -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements). +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements](../node-requirements-for-rancher-managed-clusters.md#networking-requirements). * Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. @@ -29,7 +29,7 @@ The cluster should have: - At least two nodes with the role `controlplane` for master component high availability. - At least two nodes with the role `worker` for workload rescheduling upon node failure. -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles) +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](roles-for-nodes-in-kubernetes.md) ### Number of Controlplane Nodes @@ -63,7 +63,7 @@ Adding more than one node with the `worker` role will make sure your workloads c ### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications -You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: +You may have noticed that our [Kubernetes Install](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: * It allows one `etcd` node failure. * It maintains multiple instances of the master components by having multiple `controlplane` nodes. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md similarity index 94% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md index 4013efb7e0d..df3b3c3cef5 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md @@ -5,9 +5,9 @@ weight: 1 This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. -This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). +This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
+![Cluster diagram](/img/clusterdiagram.svg)
Lines show the traffic flow between components. Colors are used purely for visual aid # etcd diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/imported-clusters/imported-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/imported-clusters/imported-clusters.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md index afc04b75e25..d2bc2a4b058 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/imported-clusters/imported-clusters.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md @@ -31,14 +31,14 @@ Rancher v2.4 added the capability to import a K3s cluster into Rancher, as well After importing a cluster, the cluster owner can: -- [Manage cluster access]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and [logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) -- Enable [Istio]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) -- Use [pipelines]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/) -- Configure [alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) and [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) -- Manage [projects]({{}}/rancher/v2.0-v2.4/en/project-admin/) and [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) +- [Manage cluster access](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) through role-based access control +- Enable [monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and [logging](cluster-admin/tools/logging/) +- Enable [Istio](../../../pages-for-subheaders/istio.md) +- Use [pipelines](../../advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- Configure [alerts](cluster-admin/tools/alerts/) and [notifiers](../../../explanations/integrations-in-rancher/notifiers.md) +- Manage [projects](../../../pages-for-subheaders/manage-projects.md) and [workloads](../../../pages-for-subheaders/workloads-and-pods.md) -After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/) +After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) # Prerequisites @@ -81,7 +81,7 @@ By default, GKE users are not given this privilege, so you will need to run the # Imported K3s Clusters -You can now import a K3s Kubernetes cluster into Rancher. [K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. +You can now import a K3s Kubernetes cluster into Rancher. [K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. ### Additional Features for Imported K3s Clusters diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md similarity index 72% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md index 5af2f49ba91..6baaaaebb7f 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md @@ -8,15 +8,15 @@ There are two different agent resources deployed on Rancher managed clusters: - [cattle-cluster-agent](#cattle-cluster-agent) - [cattle-node-agent](#cattle-node-agent) -For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.0-v2.4/en/overview/architecture/) +For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture](../../../../pages-for-subheaders/rancher-manager-architecture.md) ### cattle-cluster-agent -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. ### cattle-node-agent -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters when `cattle-cluster-agent` is unavailable. > **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/amazon/amazon.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/amazon/amazon.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md index bd449cadc54..3666137155c 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/amazon/amazon.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md @@ -22,9 +22,9 @@ All nodes added to the cluster must be able to interact with EC2 so that they ca * The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. * The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. -While creating an [Amazon EC2 cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. +While creating an [Amazon EC2 cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. -While creating a [Custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes), you must manually attach the IAM role to the instance(s). +While creating a [Custom cluster](../../../../../../pages-for-subheaders/use-existing-nodes.md), you must manually attach the IAM role to the instance(s). IAM Policy for nodes with the `controlplane` role: @@ -133,7 +133,7 @@ The following resources need to tagged with a `ClusterID`: >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). -When you create an [Amazon EC2 Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. +When you create an [Amazon EC2 Cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. Use the following tag: diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/azure/azure.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/azure/azure.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/gce/gce.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/gce/gce.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/vsphere/vsphere.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md similarity index 76% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/vsphere/vsphere.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md index 2ecc8a4e6a4..2a20c6930b2 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/vsphere/vsphere.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md @@ -9,7 +9,7 @@ Follow these steps while creating the vSphere cluster in Rancher: 1. Set **Cloud Provider** option to `Custom`. - {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} + ![](/img/vsphere-node-driver-cloudprovider.png) 1. Click on **Edit as YAML** 1. Insert the following structure to the pre-populated cluster YAML. As of Rancher v2.3+, this structure must be placed under `rancher_kubernetes_engine_config`. In versions before v2.3, it has to be defined as a top-level field. Note that the `name` *must* be set to `vsphere`. @@ -22,4 +22,4 @@ Follow these steps while creating the vSphere cluster in Rancher: [Insert provider configuration] ``` -Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. \ No newline at end of file +Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md similarity index 73% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md index 4556f02fc3f..bb9d59450cb 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md @@ -35,11 +35,11 @@ Then you will create a DigitalOcean cluster in Rancher, and when configuring the ### 2. Create a node template with your cloud credentials -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. 1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** 1. Click **Add Template.** -1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) +1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) ### 3. Create a cluster with node pools using the node template @@ -47,8 +47,8 @@ Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioni 1. Choose **DigitalOcean**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. Review your options to confirm they're correct. Then click **Create**. **Result:** @@ -69,8 +69,8 @@ You can access your cluster after its state is updated to **Active.** 1. Choose **DigitalOcean**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Digital Ocean Options** form. For help filling out the form, refer to the [Digital Ocean node template configuration reference.](./do-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Digital Ocean Options** form. For help filling out the form, refer to the [Digital Ocean node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. Review your options to confirm they're correct. Then click **Create**. **Result:** @@ -91,5 +91,5 @@ You can access your cluster after its state is updated to **Active.** After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md similarity index 81% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md index 9eb755b5544..703a8eef18c 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md @@ -19,7 +19,7 @@ Then you will create an EC2 cluster in Rancher, and when configuring the new clu - **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) - **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. @@ -50,22 +50,22 @@ The steps to create a cluster differ based on your Rancher version. ### 2. Create a node template with your cloud credentials and information from EC2 -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. 1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** 1. Click **Add Template.** -1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) +1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) ### 3. Create a cluster with node pools using the node template -Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +Add one or more node pools to your cluster. For more information about node pools, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. From the **Clusters** page, click **Add Cluster**. 1. Choose **Amazon EC2**. 1. Enter a **Cluster Name**. -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) 1. Click **Create**. **Result:** @@ -86,8 +86,8 @@ You can access your cluster after its state is updated to **Active.** 1. Choose **Amazon EC2**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** Refer to [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) To create a node template, click **Add Node Template**. For help filling out the node template, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** Refer to [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) To create a node template, click **Add Node Template**. For help filling out the node template, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) 1. Click **Create**. 1. **Optional:** Add additional node pools. 1. Review your cluster settings to confirm they are correct. Then click **Create**. @@ -109,8 +109,8 @@ You can access your cluster after its state is updated to **Active.** After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. # IAM Policies diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/azure/azure.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md similarity index 73% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/azure/azure.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md index 20728b35d00..a9dc25f0887 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/azure/azure.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md @@ -9,7 +9,7 @@ aliases: import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. +In this section, you'll learn how to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Azure through Rancher. First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. @@ -22,9 +22,9 @@ Then you will create an Azure cluster in Rancher, and when configuring the new c > For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). -For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) +For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) -For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) +For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) - [Preparation in Azure](#preparation-in-azure) - [Creating an Azure Cluster](#creating-an-azure-cluster) @@ -68,11 +68,11 @@ The creation of this service principal returns three pieces of identification in ### 2. Create a node template with your cloud credentials -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. 1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** 1. Click **Add Template.** -1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) +1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) ### 3. Create a cluster with node pools using the node template @@ -82,8 +82,8 @@ Use Rancher to create a Kubernetes cluster in Azure. 1. Choose **Azure**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. Review your options to confirm they're correct. Then click **Create**. **Result:** @@ -106,8 +106,8 @@ Use Rancher to create a Kubernetes cluster in Azure. 1. Choose **Azure**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Azure Options** form. For help filling out the form, refer to the [Azure node template configuration reference.](./azure-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Azure Options** form. For help filling out the form, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) 1. Review your options to confirm they're correct. Then click **Create**. **Result:** @@ -128,5 +128,5 @@ You can access your cluster after its state is updated to **Active.** After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md similarity index 88% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md index 1a3a57d155b..074e10bab27 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md @@ -24,20 +24,20 @@ The following steps create a role with the required privileges and then assign i 3. Create a new role. Give it a name and select the privileges listed in the permissions table above. - {{< img "/img/rancher/rancherroles1.png" "image" >}} + ![](/img/rancherroles1.png) 4. Go to the **Users and Groups** tab. 5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. - {{< img "/img/rancher/rancheruser.png" "image" >}} + ![](/img/rancheruser.png) 6. Go to the **Global Permissions** tab. 7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - {{< img "/img/rancher/globalpermissionuser.png" "image" >}} + ![](/img/globalpermissionuser.png) - {{< img "/img/rancher/globalpermissionrole.png" "image" >}} + ![](/img/globalpermissionrole.png) **Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md similarity index 64% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md index 17031e0e33b..81f72eed67c 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md @@ -7,15 +7,15 @@ weight: 1 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -In this section, you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. -For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) +For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.](cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) -For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) +For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) - [Preparation in vSphere](#preparation-in-vsphere) - [Creating a vSphere Cluster](#creating-a-vsphere-cluster) @@ -30,7 +30,7 @@ The node templates are documented and tested with the vSphere Web Services API v Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. -Refer to this [how-to guide]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. +Refer to this [how-to guide](cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. ### Network Permissions @@ -40,7 +40,7 @@ It must be ensured that the hosts running the Rancher server are able to establi - To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher before v2.3.3 or when using the ISO creation method in later versions*). - To port 22/TCP and 2376/TCP on the created VMs -See [Node Networking Requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. +See [Node Networking Requirements](../../../node-requirements-for-rancher-managed-clusters.md#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. ### Valid ESXi License for vSphere API Access @@ -67,21 +67,21 @@ The a vSphere cluster is created in Rancher depends on the Rancher version. 1. Click **Add Cloud Credential.** 1. Enter a name for the cloud credential. 1. In the **Cloud Credential Type** field, select **vSphere**. -1. Enter your vSphere credentials. For help, refer to **Account Access** in the [configuration reference for your Rancher version.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/) +1. Enter your vSphere credentials. For help, refer to **Account Access** in the [configuration reference for your Rancher version.](../../../../../../pages-for-subheaders/creating-a-vsphere-cluster.md) 1. Click **Create.** **Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. ### 2. Create a node template with your cloud credentials -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. +Creating a [node template](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. 1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** 1. Click **Add Template.** 1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.3.3]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3) - - [v2.3.0]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0) - - [v2.2.0]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0) + - [v2.3.3](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md) + - [v2.3.0](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md) + - [v2.2.0](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md) ### 3. Create a cluster with node pools using the node template @@ -91,9 +91,9 @@ Use Rancher to create a Kubernetes cluster in vSphere. 1. Click **Add Cluster** and select the **vSphere** infrastructure provider. 1. Enter a **Cluster Name.** 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) 1. Review your options to confirm they're correct. Then click **Create**. **Result:** @@ -118,11 +118,11 @@ For Rancher versions before v2.0.4, when you create the cluster, you will also n 1. Choose **vSphere**. 1. Enter a **Cluster Name**. 1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more [node pools]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **vSphere Options** form. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.0.4]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4) - - [before v2.0.4]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4) +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) +1. Add one or more [node pools](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **vSphere Options** form. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: + - [v2.0.4](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md) + - [before v2.0.4](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md) 1. Review your options to confirm they're correct. Then click **Create** to start provisioning the VMs and Kubernetes services. **Result:** @@ -146,6 +146,6 @@ You can access your cluster after its state is updated to **Active.** After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. -- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../../../../../advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md index 0677c360b50..770ee0d9e77 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md @@ -7,7 +7,7 @@ If you are using Azure VMs for your nodes, you can use [Azure files](https://doc In order to have the Azure platform create the required storage resources, follow these steps: -1. [Configure the Azure cloud provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure) +1. [Configure the Azure cloud provider.](../set-up-cloud-providers/other-cloud-providers/azure.md) 1. Configure `kubectl` to connect to your cluster. 1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/docs-for-2.1-and-2.2.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md similarity index 85% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/docs-for-2.1-and-2.2.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md index c1f5ce3bae5..56f1e0f654b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/docs-for-2.1-and-2.2.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md @@ -7,9 +7,9 @@ aliases: _Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ -This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). +This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later](../../../../../pages-for-subheaders/use-windows-clusters.md). -When you create a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. +When you create a [custom cluster](../../../../../pages-for-subheaders/use-existing-nodes.md), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. @@ -45,7 +45,7 @@ When setting up a custom cluster with support for Windows nodes and containers, ## 1. Provision Hosts -To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: +To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements](../../../../../pages-for-subheaders/installation-requirements.md)—two Linux, one Windows. Your hosts can be: - Cloud-hosted VMs - VMs from virtualization clusters @@ -61,7 +61,7 @@ Node 3 | Windows (Windows Server core version 1809 or above) | Worker ### Requirements -- You can view node requirements for Linux and Windows nodes in the [installation section]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). +- You can view node requirements for Linux and Windows nodes in the [installation section](../../../../../pages-for-subheaders/installation-requirements.md). - All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. - To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. - Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. @@ -81,20 +81,20 @@ Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/az ## 3. Create the Custom Cluster -To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/), starting from 2. Create the Custom Cluster. While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. +To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md), starting from 2. Create the Custom Cluster. While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. ### Enable the Windows Support Option While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. -After you select this option, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 6]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). +After you select this option, resume [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md) from [step 6](../../../../../pages-for-subheaders/use-existing-nodes.md#step-6). ### Networking Option When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. -If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. +If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. ### Node Configuration @@ -105,7 +105,7 @@ Option | Setting Node Operating System | Linux Node Roles | etcd
Control Plane
Worker -When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 8]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). +When you're done with these configurations, resume [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md) from [step 8](../../../../../pages-for-subheaders/use-existing-nodes.md#step-8). diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/node-requirements/node-requirements.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md similarity index 79% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/node-requirements/node-requirements.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md index 1a8f09fe173..d1afa1431b4 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/node-requirements/node-requirements.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md @@ -8,7 +8,7 @@ import TabItem from '@theme/TabItem'; This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.](../../../pages-for-subheaders/installation-requirements.md) Make sure the nodes for the Rancher server fulfill the following requirements: @@ -25,7 +25,7 @@ For details on which OS and Docker versions were tested with each Rancher versio All supported operating systems are 64-bit x86. -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.0-v2.4/en/installation/options/arm64-platform/) +If you plan to use ARM64, see [Running on ARM64 (Experimental).](installation/options/arm64-platform/) For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) @@ -35,11 +35,11 @@ Some distributions of Linux derived from RHEL, including Oracle Linux, may have ### SUSE Linux Nodes -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. ### Flatcar Container Linux Nodes -When [Launching Kubernetes with Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) +When [Launching Kubernetes with Rancher](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) @@ -84,7 +84,7 @@ It is also required to enable the Docker service, you can enable the Docker serv systemctl enable docker.service ``` -The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/#node-drivers). +The Docker service is enabled automatically when using [Node Drivers](../../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers). ### Windows Nodes @@ -92,7 +92,7 @@ _Windows worker nodes can be used as of Rancher v2.3.0_ Nodes with Windows Server must run Docker Enterprise Edition. -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows](../../../pages-for-subheaders/use-windows-clusters.md) # Hardware Requirements @@ -108,14 +108,14 @@ For hardware recommendations for etcd clusters in production, refer to the offic For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.](https://rancher.com/docs/rke/latest/en/os/#ports) -Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). +Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes). # Optional: Security Considerations If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.0-v2.4/en/security/#rancher-hardening-guide) +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.](../../../pages-for-subheaders/rancher-security.md#rancher-hardening-guide) diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/aks/aks.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/aks/aks.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md similarity index 82% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md index c6daf4d0cd6..5e862ee9d07 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md @@ -6,7 +6,7 @@ weight: 2120 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. +You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. ## Prerequisites diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/gke/gke.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/gke/gke.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md similarity index 89% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md index ce9f35e2362..d0e156615b3 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md @@ -6,7 +6,7 @@ weight: 2130 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. +You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. ## Prerequisites in Huawei @@ -59,7 +59,7 @@ You can access your cluster after its state is updated to **Active.** | Cluster Label | The labels for the cluster. | | Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | -**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) +**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) # Node Configuration diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md similarity index 88% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md index dca60a832a9..ff8574f687b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md @@ -6,7 +6,7 @@ weight: 2125 _Available as of v2.2.0_ -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. +You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. ## Prerequisites in Tencent @@ -48,7 +48,7 @@ You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) 7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/configmaps/configmaps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md similarity index 90% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/configmaps/configmaps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md index 124ae82895a..8033ef02b1b 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/configmaps/configmaps.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md @@ -26,11 +26,11 @@ ConfigMaps accept key value pairs in common string formats, like config files or 1. Click **Save**. - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/). + >**Note:** Don't use ConfigMaps to store sensitive data [use a secret](secrets.md). > >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} + > ![](/img/bulk-key-values.gif) **Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. @@ -41,4 +41,4 @@ Now that you have a ConfigMap added to a namespace, you can add it to a workload - Application environment variables. - Specifying parameters for a Volume mounted to the workload. -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +For more information on adding ConfigMaps to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/service-discovery/service-discovery.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/service-discovery/service-discovery.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/certificates/certificates.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md similarity index 97% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/certificates/certificates.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md index 9c5ed85d684..7a1118d7630 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/certificates/certificates.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md @@ -43,4 +43,4 @@ Add SSL certificates to either projects, namespaces, or both. A project scoped c ## What's Next? -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). +Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress](load-balancer-and-ingress-controller/add-ingresses.md). diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md similarity index 97% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md index 369f7a1a8d4..502bfd33ebf 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md @@ -22,7 +22,7 @@ HPA improves your services by: ## How HPA Works -![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) +![HPA Schema](/img/horizontal-pod-autoscaler.jpg) HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/hpa-for-rancher-before-2_0_7.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/hpa-for-rancher-before-2_0_7.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md similarity index 98% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md index 9a67085107c..f3c912ffeb8 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md @@ -15,11 +15,11 @@ This section describes HPA management with `kubectl`. This document has instruct ### Note For Rancher v2.3.x -In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. +In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI](manage-hpas-with-ui.md). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. ### Note For Rancher Before v2.0.7 -Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). +Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7](hpa-for-rancher-before-2.0.7.md). ##### Basic kubectl Command for Managing HPAs diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md similarity index 84% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md index 49d3a4866e9..4f0b1e2ee1f 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md @@ -9,7 +9,7 @@ _Available as of v2.3.0_ The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). ## Creating an HPA @@ -27,7 +27,7 @@ If you want to create HPAs that scale based on other metrics than CPU and memory 1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). 1. Click **Create** to create the HPA. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md similarity index 99% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md index e80771520d1..65240fa671d 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md @@ -6,7 +6,7 @@ aliases: - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa --- -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI](manage-hpas-with-kubectl.md). For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/registries/registries.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/registries/registries.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md similarity index 96% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md index 82a6da5d5d1..49a163e1306 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md @@ -7,7 +7,7 @@ aliases: - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress --- -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). +Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry](../../helm-charts-in-rancher/globaldns.md). 1. From the **Global** view, open the project that you want to add ingress to. 1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions before v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. @@ -58,7 +58,7 @@ Use this option to set an ingress rule for handling requests that don't match an 1. Select a service or workload from the **Target** drop-down list. ### Certificates ->**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/). +>**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates](../encrypt-http-communication.md). 1. Click **Add Certificate**. 1. Select a **Certificate** from the drop-down list. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/secrets/secrets.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md similarity index 89% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/secrets/secrets.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md index a852a837e63..4ea67ee24b3 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/secrets/secrets.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md @@ -8,7 +8,7 @@ aliases: [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. -> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries) +> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.](kubernetes-and-docker-registries.md) When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. @@ -32,7 +32,7 @@ When creating a secret, you can make it available for any deployment within a pr >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} + > ![](/img/bulk-key-values.gif) 1. Click **Save**. @@ -44,4 +44,4 @@ Mounted secrets will be updated automatically unless they are mounted as subpath Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. -For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +For more information on adding secret to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/add-a-sidecar/add-a-sidecar.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md similarity index 98% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/add-a-sidecar/add-a-sidecar.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md index eda77f05ab1..1f4b8d05a3b 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/add-a-sidecar/add-a-sidecar.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md @@ -27,7 +27,7 @@ A _sidecar_ is a container that extends or enhances the main container in a pod. 1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. -1. Set the remaining options. You can read about them in [Deploying Workloads](../deploy-workloads). +1. Set the remaining options. You can read about them in [Deploying Workloads](deploy-workloads.md). 1. Click **Launch**. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/deploy-workloads/deploy-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md similarity index 81% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/deploy-workloads/deploy-workloads.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md index 249bd6e59d9..e4afb406df0 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/deploy-workloads/deploy-workloads.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md @@ -15,25 +15,25 @@ Deploy a workload to run an application in one or more containers. 1. Enter a **Name** for the workload. -1. Select a [workload type]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** +1. Select a [workload type](../../../../pages-for-subheaders/workloads-and-pods.md). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** 1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. 1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/#services). +1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services](../../../../pages-for-subheaders/workloads-and-pods.md#services). 1. Configure the remaining options: - **Environment Variables** - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/). + Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap](../configmaps.md). - **Node Scheduling** - **Health Check** - **Volumes** - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/). + Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap](../configmaps.md). When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. @@ -45,7 +45,7 @@ Deploy a workload to run an application in one or more containers. > >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes). + >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster](../../kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) or [Creating a Custom Cluster](../../../../pages-for-subheaders/use-existing-nodes.md). 1. Click **Show Advanced Options** and configure: diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/upgrade-workloads/upgrade-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md similarity index 100% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/upgrade-workloads/upgrade-workloads.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/discover-services/discover-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md similarity index 90% rename from versioned_docs/version-2.0-2.4/v1.6-migration/discover-services/discover-services.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md index 426bf504021..6b3b7feeede 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/discover-services/discover-services.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md @@ -11,7 +11,7 @@ This document will also show you how to link the workloads and services that you
Resolve the output.txt Link Directive
-![Resolve Link Directive]({{}}/img/rancher/resolve-links.png) +![Resolve Link Directive](/img/resolve-links.png) ## In This Document @@ -62,11 +62,11 @@ Pods can also be resolved using the `hostname` and `subdomain` fields if set in When you migrate v1.6 services to v2.x, Rancher does not automatically create a Kubernetes service record for each migrated deployment. Instead, you'll have to link the deployment and service together manually, using any of the methods listed below. -In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) are linked together. +In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing](migrate-services.md#migration-example-file-output) our [migration example services](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) are linked together.
Linked Workload and Kubernetes Service
-![Linked Workload and Kubernetes Service]({{}}/img/rancher/linked-service-workload.png) +![Linked Workload and Kubernetes Service](/img/linked-service-workload.png) ### Service Name Alias Creation @@ -78,7 +78,7 @@ Using the v2.x UI, use the context menu to navigate to the `Project` view. Then Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods.
Add Service Discovery Record
-![Add Service Discovery Record]({{}}/img/rancher/add-record.png) +![Add Service Discovery Record](/img/add-record.png) The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. @@ -91,4 +91,4 @@ Pointing to another workload | | ✓ Create alias for another DNS record | | ✓ -### [Next: Load Balancing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/) +### [Next: Load Balancing](load-balancing.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/expose-services/expose-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md similarity index 95% rename from versioned_docs/version-2.0-2.4/v1.6-migration/expose-services/expose-services.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md index f2bdd089093..2901cbeb91b 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/expose-services/expose-services.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md @@ -11,7 +11,7 @@ Use this document to correct workloads that list `ports` in `output.txt`. You ca
Resolve ports for the web Workload
-![Resolve Ports]({{}}/img/rancher/resolve-ports.png) +![Resolve Ports](/img/resolve-ports.png) ## In This Document @@ -40,7 +40,7 @@ A _HostPort_ is a port exposed to the public on a _specific node_ running one or In the following diagram, a user is trying to access an instance of Nginx, which is running within a pod on port 80. However, the Nginx deployment is assigned a HostPort of 9890. The user can connect to this pod by browsing to its host IP address, followed by the HostPort in use (9890 in case). -![HostPort Diagram]({{}}/img/rancher/hostPort.svg) +![HostPort Diagram](/img/hostPort.svg) #### HostPort Pros @@ -63,7 +63,7 @@ For example, for the web-deployment.yml file parsed from v1.6 that we've been us
Port Mapping: Setting HostPort
-{{< img "/img/rancher/set-hostport.gif" "Set HostPort">}} +![](/img/set-hostport.gif) ## NodePort @@ -73,7 +73,7 @@ NodePorts help you circumvent an IP address shortcoming. Although pods can be re In the following diagram, a user is trying to connect to an instance of Nginx running in a Kubernetes cluster managed by Rancher. Although he knows what NodePort Nginx is operating on (30216 in this case), he does not know the IP address of the specific node that the pod is running on. However, with NodePort enabled, he can connect to the pod using the IP address for _any_ node in the cluster. Kubeproxy will forward the request to the correct node and pod. -![NodePort Diagram]({{}}/img/rancher/nodePort.svg) +![NodePort Diagram](/img/nodePort.svg) NodePorts are available within your Kubernetes cluster on an internal IP. If you want to expose pods external to the cluster, use NodePorts in conjunction with an external load balancer. Traffic requests from outside your cluster for `:` are directed to the workload. The `` can be the IP address of any node in your Kubernetes cluster. @@ -101,6 +101,6 @@ For example, for the `web-deployment.yml` file parsed from v1.6 that we've been
Port Mapping: Setting NodePort
-{{< img "/img/rancher/set-nodeport.gif" "Set NodePort" >}} +![](/img/set-nodeport.gif) -### [Next: Configure Health Checks]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps) +### [Next: Configure Health Checks](monitor-apps.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/get-started/get-started.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md similarity index 67% rename from versioned_docs/version-2.0-2.4/v1.6-migration/get-started/get-started.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md index 6a0e7714a95..745752f249a 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/get-started/get-started.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md @@ -24,31 +24,31 @@ The first step in migrating from v1.6 to v2.x is to install the Rancher v2.x Ser New for v2.x, all communication to Rancher Server is encrypted. The procedures below instruct you not only on installation of Rancher, but also creation and installation of these certificates. -Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). +Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements](../../../pages-for-subheaders/installation-requirements.md). After provisioning your node(s), install Rancher: -- [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/single-node) +- [Docker Install](installation/single-node) For development environments, Rancher can be installed on a single node using Docker. This installation procedure deploys a single Rancher container to your host. -- [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) +- [Kubernetes Install](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. - >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) for full requirements. + >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) for full requirements. ## B. Configure Authentication -After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication). +After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication](../../../pages-for-subheaders/about-authentication.md).
Rancher v2.x Authentication
-![Rancher v2.x Authentication]({{}}/img/rancher/auth-providers.svg) +![Rancher v2.x Authentication](/img/auth-providers.svg) ### Local Users -Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) and assign them access rights. +Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts](../../../pages-for-subheaders/about-authentication.md) and assign them access rights. As a best practice, you should use a hybrid of external _and_ local authentication. This practice provides access to Rancher should your external authentication experience an interruption, as you can still log in using a local user account. Set up a few local accounts as administrative users of Rancher. @@ -63,7 +63,7 @@ Begin work in Rancher v2.x by using it to provision a new Kubernetes cluster, wh A cluster and project in combined together in Rancher v2.x is equivalent to a v1.6 environment. A _cluster_ is the compute boundary (i.e., your hosts) and a _project_ is an administrative boundary (i.e., a grouping of namespaces used to assign access rights to users). -There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). ### Clusters @@ -71,32 +71,32 @@ In Rancher v1.6, compute nodes were added to an _environment_. Rancher v2.x esch Rancher v2.x lets you launch a Kubernetes cluster anywhere. Host your cluster using: -- A [hosted Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/). -- A [pool of nodes from an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). Rancher launches Kubernetes on the nodes. -- Any [custom node(s)]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. +- A [hosted Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). +- A [pool of nodes from an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). Rancher launches Kubernetes on the nodes. +- Any [custom node(s)](../../../pages-for-subheaders/use-existing-nodes.md). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. ### Projects -Additionally, Rancher v2.x introduces [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. +Additionally, Rancher v2.x introduces [projects](k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. When you create a cluster, two projects are automatically created: - The `System` project, which includes system namespaces where important Kubernetes resources are running (like ingress controllers and cluster dns services) - The `Default` project. -However, for production environments, we recommend [creating your own project]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/#creating-projects) and giving it a descriptive name. +However, for production environments, we recommend [creating your own project](../../advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) and giving it a descriptive name. -After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. +After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects](k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. ## D. Create Stacks -In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/), which are the v2.x equivalent of stacks, for the same purpose. +In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces](k8s-in-rancher/projects-and-namespaces/), which are the v2.x equivalent of stacks, for the same purpose. In Rancher v2.x, namespaces are child objects to projects. When you create a project, a `default` namespace is added to the project, but you can create your own to parallel your stacks from v1.6. During migration, if you don't explicitly define which namespace a service should be deployed to, it's deployed to the `default` namespace. -Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services) soon). +Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery](discover-services.md) soon). -### [Next: Migrate Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool) +### [Next: Migrate Your Services](migrate-services.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/kub-intro/kub-intro.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md similarity index 97% rename from versioned_docs/version-2.0-2.4/v1.6-migration/kub-intro/kub-intro.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md index bf88c6a2b94..a7d4d7e47a6 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/kub-intro/kub-intro.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md @@ -38,4 +38,4 @@ Because Rancher v1.6 defaulted to our Cattle container orchestrator, it primaril More detailed information on Kubernetes concepts can be found in the [Kubernetes Concepts Documentation](https://kubernetes.io/docs/concepts/). -### [Next: Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started/) +### [Next: Get Started](install-and-configure-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/load-balancing/load-balancing.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md similarity index 87% rename from versioned_docs/version-2.0-2.4/v1.6-migration/load-balancing/load-balancing.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md index e740ca3d4a7..4af7090515c 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/load-balancing/load-balancing.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md @@ -13,7 +13,7 @@ If you encounter the `output.txt` text below after parsing your v1.6 Compose fil
output.txt Load Balancer Directive
-![Resolve Load Balancer Directive]({{}}/img/rancher/resolve-load-balancer.png) +![Resolve Load Balancer Directive](/img/resolve-load-balancer.png) ## In This Document @@ -45,7 +45,7 @@ By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisione RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. -For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). +For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/). ## Load Balancing Architecture @@ -57,13 +57,13 @@ In Rancher v1.6 you could deploy a scalable load balancer service within your st
Rancher v1.6 Load Balancing Architecture
-![Rancher v1.6 Load Balancing]({{}}/img/rancher/cattle-load-balancer.svg) +![Rancher v1.6 Load Balancing](/img/cattle-load-balancer.svg) The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads.
Rancher v2.x Load Balancing Architecture
-![Rancher v2.x Load Balancing]({{}}/img/rancher/kubernetes-load-balancer.svg) +![Rancher v2.x Load Balancing](/img/kubernetes-load-balancer.svg) ## Ingress Caveats @@ -81,13 +81,13 @@ You can launch a new load balancer to replace your load balancer from v1.6. Usin >**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. > -![Workload Scale]({{}}/img/rancher/workload-scale.png) +![Workload Scale](/img/workload-scale.png) For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects.
Browsing to Load Balancer Tab and Adding Ingress
-![Adding Ingress]({{}}/img/rancher/add-ingress.gif) +![Adding Ingress](/img/add-ingress.gif) Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. @@ -99,13 +99,13 @@ For example, let's say you have multiple workloads deployed to a single namespac
Ingress: Path-Based Routing Configuration
-![Ingress: Path-Based Routing Configuration]({{}}/img/rancher/add-ingress-form.png) +![Ingress: Path-Based Routing Configuration](/img/add-ingress-form.png) Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address.
Workload Links
-![Load Balancer Links to Workloads]({{}}/img/rancher/load-balancer-links.png) +![Load Balancer Links to Workloads](/img/load-balancer-links.png) The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: @@ -117,24 +117,24 @@ kubectl get ingress Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want to use it, you need to use a valid SSL/TLS certificate. While configuring Ingress rules, use the **SSL/TLS Certificates** section to configure a certificate. -- We recommend [uploading a certificate]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. -- If you have configured [NGINX default certificate]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. +- We recommend [uploading a certificate](../kubernetes-resources-setup/encrypt-http-communication.md) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. +- If you have configured [NGINX default certificate](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**.
Load Balancer Configuration: SSL/TLS Certificate Section
-![SSL/TLS Certificates Section]({{}}/img/rancher/load-balancer-ssl-certs.png) +![SSL/TLS Certificates Section](/img/load-balancer-ssl-certs.png) ### TCP Load Balancing Options #### Layer-4 Load Balancer -For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. +For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`.
Workload Deployment: Layer 4 Load Balancer Creation
-![Deploy Layer-4 Load Balancer]({{}}/img/rancher/deploy-workload-load-balancer.png) +![Deploy Layer-4 Load Balancer](/img/deploy-workload-load-balancer.png) Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. @@ -146,7 +146,7 @@ However, there is a workaround to use NGINX's TCP balancing by creating a Kubern To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. -![Layer-4 Load Balancer: ConfigMap Workaround]({{}}/img/rancher/layer-4-lb-config-map.png) +![Layer-4 Load Balancer: ConfigMap Workaround](/img/layer-4-lb-config-map.png) The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/run-migration-tool/run-migration-tool.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md similarity index 87% rename from versioned_docs/version-2.0-2.4/v1.6-migration/run-migration-tool/run-migration-tool.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md index 79c33081eba..5003c4eecdd 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/run-migration-tool/run-migration-tool.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md @@ -67,7 +67,7 @@ Next, use the migration-tools CLI to export all stacks in all of the Cattle envi **Step Result:** migration-tools exports Compose files (`docker-compose.yml` and `rancher-compose.yml`) for each stack in the `--export-dir` directory. If you omitted this option, Compose files are output to your current directory. - A unique directory is created for each environment and stack. For example, if we export each [environment/stack]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) from Rancher v1.6, the following directory structure is created: + A unique directory is created for each environment and stack. For example, if we export each [environment/stack](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) from Rancher v1.6, the following directory structure is created: ``` export/ # migration-tools --export-dir @@ -90,7 +90,7 @@ Next, use the migration-tools CLI to export all stacks in all of the Cattle envi >**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, migration-tools uses the current working directory to find Compose files. ->**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/). +>**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference](../../../reference-guides/v1.6-migration/migration-tools-cli-reference.md). ### migration-tools CLI Output @@ -109,7 +109,7 @@ When a you export a service from Rancher v1.6 that exposes public ports, migrati #### Migration Example File Output -If we parse the two example files from [Migration Example Files]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: +If we parse the two example files from [Migration Example Files](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: File | Description -----|------------ @@ -247,12 +247,12 @@ You can deploy the Kubernetes manifests created by migration-tools by importing
Deploy Services: Import Kubernetes Manifest
-![Deploy Services]({{}}/img/rancher/deploy-service.gif) +![Deploy Services](/img/deploy-service.gif)
->**Prerequisite:** [Install Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli/) for Rancher v2.x. +>**Prerequisite:** [Install Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md) for Rancher v2.x. Use the following Rancher CLI commands to deploy your application using Rancher v2.x. For each Kubernetes manifest output by migration-tools CLI, enter one of the commands below to import it into Rancher v2.x. @@ -269,7 +269,7 @@ Following importation, you can view your v1.6 services in the v2.x UI as Kuberne
Imported Services
-![Imported Services]({{}}/img/rancher/imported-workloads.png) +![Imported Services](/img/imported-workloads.png) ## What Now? @@ -277,15 +277,15 @@ Although the migration-tool CLI parses your Rancher v1.6 Compose files to Kubern
Edit Migrated Services
-![Edit Migrated Workload]({{}}/img/rancher/edit-migration-workload.gif) +![Edit Migrated Workload](/img/edit-migration-workload.gif) As mentioned in [Migration Tools CLI Output](#migration-tools-cli-output), the `output.txt` files generated during parsing lists the manual steps you must make for each deployment. Review the upcoming topics for more information on manually editing your Kubernetes specs. -Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x. +Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x.
Output.txt Example
-![output.txt]({{}}/img/rancher/output-dot-text.png) +![output.txt](/img/output-dot-text.png) The following table lists possible directives that may appear in `output.txt`, what they mean, and links on how to resolve them. @@ -298,16 +298,16 @@ Directive | Instructions [scale][5] | In v1.6, scale refers to the number of container replicas running on a single node. In v2.x, this feature is replaced by replica sets. start_on_create | No Kubernetes equivalent. No action is required from you. -[1]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x -[2]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[3]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services -[4]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services -[5]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node +[1]:v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x +[2]:v1.6-migration/schedule-workloads/#scheduling-using-labels +[3]:v1.6-migration/discover-services +[4]:v1.6-migration/expose-services +[5]:v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node -[7]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[8]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-global-services -[9]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#label-affinity-antiaffinity +[7]:v1.6-migration/schedule-workloads/#scheduling-using-labels +[8]:v1.6-migration/schedule-workloads/#scheduling-global-services +[9]:v1.6-migration/schedule-workloads/#label-affinity-antiaffinity -### [Next: Expose Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/) +### [Next: Expose Your Services](expose-services.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/monitor-apps/monitor-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md similarity index 86% rename from versioned_docs/version-2.0-2.4/v1.6-migration/monitor-apps/monitor-apps.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md index ca85bf0e1f1..e1f163f5123 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/monitor-apps/monitor-apps.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md @@ -18,7 +18,7 @@ For example, for the image below, we would configure liveness probes for the `we
Resolve health_check for the web and webLB Workloads
-![Resolve health_check]({{}}/img/rancher/resolve-health-checks.png) +![Resolve health_check](/img/resolve-health-checks.png) ## In This Document @@ -46,7 +46,7 @@ The health check microservice features two types of health checks, which have a The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. -![Rancher v1.6 Health Checks]({{}}/img/rancher/healthcheck.svg) +![Rancher v1.6 Health Checks](/img/healthcheck.svg) ## Rancher v2.x Health Checks @@ -74,11 +74,11 @@ Kubernetes includes two different _types_ of probes: liveness checks and readine The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. -![Rancher v2.x Probes]({{}}/img/rancher/probes.svg) +![Rancher v2.x Probes](/img/probes.svg) ## Configuring Probes in Rancher v2.x -The [migration-tool CLI]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. +The [migration-tool CLI](migrate-services.md) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. Using the Rancher v2.x UI, you can add TCP or HTTP health checks to Kubernetes workloads. By default, Rancher asks you to configure a readiness check for your workloads and applies a liveness check using the same configuration. Optionally, you can define a separate liveness check. @@ -88,7 +88,7 @@ Configure probes by using the **Health Check** section while editing deployments
Edit Deployment: Health Check Section
-![Health Check Section]({{}}/img/rancher/health-check-section.png) +![Health Check Section](/img/health-check-section.png) ### Configuring Checks @@ -99,9 +99,9 @@ While you create a workload using Rancher v2.x, we recommend configuring a check TCP checks monitor your deployment's health by attempting to open a connection to the pod over a specified port. If the probe can open the port, it's considered healthy. Failure to open it is considered unhealthy, which notifies Kubernetes that it should kill the pod and then replace it according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). -You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). +You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). -![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) +![TCP Check](/img/readiness-check-tcp.png) When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. @@ -137,9 +137,9 @@ When you configure a readiness check using Rancher v2.x, the `readinessProbe` di HTTP checks monitor your deployment's health by sending an HTTP GET request to a specific URL path that you define. If the pod responds with a message range of `200`-`400`, the health check is considered successful. If the pod replies with any other value, the check is considered unsuccessful, so Kubernetes kills and replaces the pod according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). -You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). +You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). -![HTTP Check]({{}}/img/rancher/readiness-check-http.png) +![HTTP Check](/img/readiness-check-http.png) When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. @@ -150,15 +150,15 @@ When you configure a readiness check using Rancher v2.x, the `readinessProbe` di While configuring a readiness check for either the TCP or HTTP protocol, you can configure a separate liveness check by clicking the **Define a separate liveness check**. For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). -![Separate Liveness Check]({{}}/img/rancher/separate-check.png) +![Separate Liveness Check](/img/separate-check.png) ### Additional Probing Options Rancher v2.x, like v1.6, lets you perform health checks using the TCP and HTTP protocols. However, Rancher v2.x also lets you check the health of a pod by running a command inside of it. If the container exits with a code of `0` after running the command, the pod is considered healthy. -You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). -![Healthcheck Execute Command]({{}}/img/rancher/healthcheck-cmd-exec.png) +![Healthcheck Execute Command](/img/healthcheck-cmd-exec.png) #### Health Check Parameter Mappings @@ -174,4 +174,4 @@ Rancher v1.6 Compose Parameter | Rancher v2.x Kubernetes Parameter `initializing_timeout` | `initialDelaySeconds` `strategy` | `restartPolicy` -### [Next: Schedule Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/) +### [Next: Schedule Your Services](schedule-services.md) diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/schedule-workloads/schedule-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md similarity index 91% rename from versioned_docs/version-2.0-2.4/v1.6-migration/schedule-workloads/schedule-workloads.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md index b993d1eec83..965a85165e8 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/schedule-workloads/schedule-workloads.md +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md @@ -15,7 +15,7 @@ You can schedule your migrated v1.6 services while editing a deployment. Schedul
Editing Workloads: Workload Type and Node Scheduling Sections
-![Workload Type and Node Scheduling Sections]({{}}/img/rancher/migrate-schedule-workloads.png) +![Workload Type and Node Scheduling Sections](/img/migrate-schedule-workloads.png) ## In This Document @@ -41,7 +41,7 @@ Rancher v2.x retains _all_ methods available in v1.6 for scheduling your service In v1.6, you would schedule a service to a host while adding a service to a Stack. In Rancher v2.x., the equivalent action is to schedule a workload for deployment. The following composite image shows a comparison of the UI used for scheduling in Rancher v2.x versus v1.6. -![Node Scheduling: Rancher v2.x vs v1.6]({{}}/img/rancher/node-scheduling.png) +![Node Scheduling: Rancher v2.x vs v1.6](/img/node-scheduling.png) ## Node Scheduling Options @@ -49,7 +49,7 @@ Rancher offers a variety of options when scheduling nodes to host workload pods You can choose a scheduling option as you deploy a workload. The term _workload_ is synonymous with adding a service to a Stack in Rancher v1.6). You can deploy a workload by using the context menu to browse to a cluster project (` > > Workloads`). -The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). +The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). Option | v1.6 Feature | v2.x Feature -------|------|------ @@ -66,13 +66,13 @@ Option | v1.6 Feature | v2.x Feature In v1.6, you could control the number of container replicas deployed for a service. You can schedule pods the same way in v2.x, but you'll have to set the scale manually while editing a workload. -![Resolve Scale]({{}}/img/rancher/resolve-scale.png) +![Resolve Scale](/img/resolve-scale.png) During migration, you can resolve `scale` entries in `output.txt` by setting a value for the **Workload Type** option **Scalable deployment** depicted below.
Scalable Deployment Option
-![Workload Scale]({{}}/img/rancher/workload-type-option.png) +![Workload Scale](/img/workload-type-option.png) ### Scheduling Pods to a Specific Node @@ -83,7 +83,7 @@ As you deploy a workload, use the **Node Scheduling** section to choose a node t
Rancher v2.x: Workload Deployment
-![Workload Tab and Group by Node Icon]({{}}/img/rancher/schedule-specific-node.png) +![Workload Tab and Group by Node Icon](/img/schedule-specific-node.png) Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. @@ -91,7 +91,7 @@ If you expose the workload using a NodePort that conflicts with another workload After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. -![Pods Scheduled to Same Node]({{}}/img/rancher/scheduled-nodes.png) +![Pods Scheduled to Same Node](/img/scheduled-nodes.png) ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. @@ -219,7 +219,7 @@ To create a daemonset while configuring a workload, choose **Run one pod on each
Workload Configuration: Choose run one pod on each node to configure daemonset
-![choose Run one pod on each node]({{}}/img/rancher/workload-type.png) +![choose Run one pod on each node](/img/workload-type.png) ### Scheduling Pods Using Resource Constraints @@ -242,8 +242,8 @@ To declare resource constraints, edit your migrated workloads, editing the **Sec
Scheduling: Resource Constraint Settings
-![Resource Constraint Settings]({{}}/img/rancher/resource-constraint-settings.png) +![Resource Constraint Settings](/img/resource-constraint-settings.png) You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). -### [Next: Service Discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/) +### [Next: Service Discovery](discover-services.md) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/rke-add-on.md b/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/rke-add-on.md deleted file mode 100644 index a6989a9fe0b..00000000000 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/rke-add-on.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: RKE Add-On Install -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - - -* [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb) -* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb) -* [HTTP Proxy Configuration for a Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/proxy/) -* [Troubleshooting RKE Add-on Installs]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md b/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md index 1b258db1b9e..0cb9f6d5edb 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md +++ b/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md @@ -3,4 +3,4 @@ title: Rancher Helm Chart Options weight: 50 --- -The Rancher Helm chart options reference moved to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/) \ No newline at end of file +The Rancher Helm chart options reference moved to [this page.](../../../reference-guides/installation-references/helm-chart-options.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md b/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md deleted file mode 100644 index 22a86b8d515..00000000000 --- a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. -shortTitle: Infrastructure Tutorials -weight: 5 ---- - -To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - - -To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/resources.md b/versioned_docs/version-2.0-2.4/installation/resources/resources.md deleted file mode 100644 index bfdf93ba6bb..00000000000 --- a/versioned_docs/version-2.0-2.4/installation/resources/resources.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Resources -weight: 5 -aliases: -- /rancher/v2.0-v2.4/en/installation/options ---- - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Advanced Options - -When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: - -| Advanced Option | Available as of | -| ----------------------------------------------------------------------------------------------------------------------- | --------------- | -| [Custom CA Certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | -| [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/options/api-audit-log/) | v2.0.0 | -| [TLS Settings]({{}}/rancher/v2.0-v2.4/en/installation/options/tls-settings/) | v2.1.7 | -| [etcd configuration]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/) | v2.2.0 | -| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/k8s-in-rancher.md b/versioned_docs/version-2.0-2.4/k8s-in-rancher/k8s-in-rancher.md deleted file mode 100644 index eeaab9d0ca3..00000000000 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/k8s-in-rancher.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kubernetes Resources -weight: 19 -aliases: - - /rancher/v2.0-v2.4/en/concepts/ - - /rancher/v2.0-v2.4/en/tasks/ - - /rancher/v2.0-v2.4/en/concepts/resources/ ---- - -## Workloads - -Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. - -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. - -Following a workload deployment, you can continue working with it. You can: - -- [Upgrade]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. - -## Load Balancing and Ingress - -### Load Balancers - -After you launch an application, it's only available within the cluster. It can't be reached externally. - -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -#### Ingress - -Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. - -Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. - -For more information, see [Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - -When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -For more information, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). - -## Service Discovery - -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. - -For more information, see [Service Discovery]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery). - -## Pipelines - -After your project has been [configured to a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. - -For more information, see [Pipelines]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/). - -## Applications - -Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. - -For more information, see [Applications in a Project]({{}}/rancher/v2.0-v2.4/en/catalog/apps/). - -## Kubernetes Resources - -Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. - -Resources include: - -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/authentication.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md similarity index 65% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/authentication.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md index a53da74f6cf..1a6d9f50f4b 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/authentication.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md @@ -16,30 +16,30 @@ The Rancher authentication proxy integrates with the following external authenti | Auth Service | Available as of | | ------------------------------------------------------------------------------------------------ | ---------------- | -| [Microsoft Active Directory]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/) | v2.0.0 | -| [GitHub]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/github/) | v2.0.0 | -| [Microsoft Azure AD]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/azure-ad/) | v2.0.3 | -| [FreeIPA]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/freeipa/) | v2.0.5 | -| [OpenLDAP]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/) | v2.0.5 | -| [Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | -| [PingIdentity]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ping-federate/) | v2.0.7 | -| [Keycloak]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/keycloak/) | v2.1.0 | -| [Okta]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/okta/) | v2.2.0 | -| [Google OAuth]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/google/) | v2.3.0 | -| [Shibboleth]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth) | v2.4.0 | +| [Microsoft Active Directory](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md) | v2.0.0 | +| [GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md) | v2.0.0 | +| [Microsoft Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md) | v2.0.3 | +| [FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md) | v2.0.5 | +| [OpenLDAP](configure-openldap.md) | v2.0.5 | +| [Microsoft AD FS](configure-microsoft-ad-federation-service-saml.md) | v2.0.7 | +| [PingIdentity](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md) | v2.0.7 | +| [Keycloak](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md) | v2.1.0 | +| [Okta](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md) | v2.2.0 | +| [Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md) | v2.3.0 | +| [Shibboleth](configure-shibboleth-saml.md) | v2.4.0 |
-However, Rancher also provides [local authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/local/). +However, Rancher also provides [local authentication](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md). In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. ## Users and Groups -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control](manage-role-based-access-control-rbac.md). > **Note:** Local authentication does not support creating or managing groups. -For more information, see [Users and Groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) +For more information, see [Users and Groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md) ## Scope of Rancher Authorization @@ -76,22 +76,22 @@ Configuration of external authentication affects how principal users are managed 1. Sign into Rancher as the local principal and complete configuration of external authentication. - ![Sign In]({{}}/img/rancher/sign-in.png) + ![Sign In](/img/sign-in.png) 2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) + ![Principal ID Sharing](/img/principal-ID.png) 3. After you complete configuration, Rancher automatically signs out the local principal. - ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) + ![Sign Out Local Principal](/img/sign-out-local.png) 4. Then, Rancher automatically signs you back in as the external principal. - ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) + ![Sign In External Principal](/img/sign-in-external.png) 5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - ![Sign In External Principal]({{}}/img/rancher/users-page.png) + ![Sign In External Principal](/img/users-page.png) 6. The external principal and the local principal share the same access rights. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md new file mode 100644 index 00000000000..d9f71e5a3b0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md @@ -0,0 +1,46 @@ +--- +title: Provisioning Drivers +weight: 1140 +--- + +Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +### Rancher Drivers + +With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. + +There are two types of drivers within Rancher: + +* [Cluster Drivers](#cluster-drivers) +* [Node Drivers](#node-drivers) + +### Cluster Drivers + +_Available as of v2.2.0_ + +Cluster drivers are used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. + +By default, Rancher has activated several hosted Kubernetes cloud providers including: + +* [Amazon EKS](../reference-guides/installation-references/amazon-eks-permissions.md) +* [Google GKE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +* [Azure AKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) + +There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: + +* [Alibaba ACK](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +* [Huawei CCE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) +* [Tencent](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) + +### Node Drivers + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: + +* [Amazon EC2](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) +* [Azure](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md) +* [Digital Ocean](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md) +* [vSphere](vsphere.md) diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md similarity index 53% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md index d137d68954d..00998ad99a5 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/rke-templates.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md @@ -7,7 +7,7 @@ _Available as of Rancher v2.3.0_ RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. -RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. +RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. @@ -17,7 +17,7 @@ Admins control which cluster options can be changed by end users. RKE templates If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. The core features of RKE templates allow DevOps and security teams to: @@ -48,24 +48,24 @@ The [add-on section](#add-ons) of an RKE template is especially powerful because RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. -RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware). +RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. -As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. +As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#updating-a-template), and the cluster is upgraded to [use a newer version of the template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. # Example Scenarios When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. -These [example scenarios]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. +These [example scenarios](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md) describe how an organization could use templates to standardize cluster creation. Some of the example scenarios include the following: -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#allowing-other-users-to-control-and-share-a-template) # Template Management @@ -81,40 +81,40 @@ For the settings that cannot be overridden, the end user will not be able to dir The documents in this section explain the details of RKE template management: -- [Getting permission to create templates]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/) -- [Creating and revising templates]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/) -- [Enforcing template settings](./enforcement/#requiring-new-clusters-to-use-an-rke-template) -- [Overriding template settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/overrides/) -- [Sharing templates with cluster creators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) -- [Sharing ownership of a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) +- [Getting permission to create templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md) +- [Creating and revising templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md) +- [Enforcing template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) +- [Overriding template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md) +- [Sharing templates with cluster creators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) +- [Sharing ownership of a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-ownership-of-templates) -An [example YAML configuration file for a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml) is provided for reference. +An [example YAML configuration file for a template](../reference-guides/rke1-template-example-yaml.md) is provided for reference. # Applying Templates -You can [create a cluster from a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) +You can [create a cluster from a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md) -If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) +If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#updating-a-cluster-created-with-an-rke-template) RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. # Standardizing Hardware -RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware). +RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). # YAML Customization -If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. +If you define an RKE template as a YAML file, you can modify this [example RKE template YAML](../reference-guides/rke1-template-example-yaml.md). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. -The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. +The RKE documentation also has [annotated](https://rancher.com/docs/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. -For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) +For guidance on available options, refer to the RKE documentation on [cluster configuration.](https://rancher.com/docs/rke/latest/en/config-options/) ### Add-ons -The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). +The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file](https://rancher.com/docs/rke/latest/en/config-options/add-ons/). The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. @@ -124,4 +124,4 @@ Some things you could do with add-ons include: - Install plugins on nodes that are deployed with a Kubernetes daemonset - Automatically set up namespaces, service accounts, or role binding -The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) +The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/versioned_docs/version-2.0-2.4/api/api.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md similarity index 87% rename from versioned_docs/version-2.0-2.4/api/api.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md index ac4d369d0de..99a8a889c80 100644 --- a/versioned_docs/version-2.0-2.4/api/api.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md @@ -5,13 +5,13 @@ weight: 24 ## How to use the API -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys](../reference-guides/user-settings/api-keys.md). ## Authentication -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys](../reference-guides/user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.0-v2.4/en/api/api-tokens). +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](../reference-guides/about-the-api/api-tokens.md). ## Making requests diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-access.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md similarity index 55% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-access.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md index 1b979a5af4b..105c41ce922 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-access/cluster-access.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md @@ -5,11 +5,11 @@ weight: 1 This section is about what tools can be used to access clusters managed by Rancher. -For information on how to give users permission to access a cluster, see the section on [adding users to clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/) +For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) -For more information on roles-based access control, see [this section.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/) +For more information on roles-based access control, see [this section.](manage-role-based-access-control-rbac.md) -For information on how to set up an authentication system, see [this section.]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) +For information on how to set up an authentication system, see [this section.](about-authentication.md) ### Rancher UI @@ -18,15 +18,15 @@ Rancher provides an intuitive user interface for interacting with your clusters. ### kubectl -You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: +You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/referenccluster-admin/cluster-access/kubectloverview/), to manage your clusters. You have two options for using kubectl: -- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/). -- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](./kubectl/). +- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](k8s-in-ranchecluster-admin/cluster-access/kubectl). +- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). ### Rancher CLI -You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. +You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. ### Rancher API -Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file +Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/advanced.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/advanced.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/air-gap-helm2.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md similarity index 76% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/air-gap-helm2.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md index 40243da33a7..48a529576fc 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/air-gap-helm2/air-gap-helm2.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md @@ -37,9 +37,9 @@ Instead of running the Docker installation, you have the option to follow the Ku # Installation Outline -- [1. Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) -- [2. Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) -- [3. Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -- [4. Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) +- [1. Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +- [2. Collect and Publish Images to your Private Registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +- [3. Launch a Kubernetes Cluster with RKE](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +- [4. Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) -### [Next: Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/air-gap.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md similarity index 56% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/air-gap.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md index bdc2faf2390..cd5be4553d5 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/air-gap/air-gap.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -11,7 +11,7 @@ This section is about using the Helm CLI to install the Rancher server in an air The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. -For more information on each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/) +For more information on each installation option, refer to [this page.](installation-and-upgrade.md) Throughout the installation instructions, there will be _tabs_ for each installation option. @@ -19,13 +19,13 @@ Throughout the installation instructions, there will be _tabs_ for each installa # Installation Outline -1. [Set up infrastructure and private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) -2. [Collect and publish images to your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) -3. [Set up a Kubernetes cluster (Skip this step for Docker installations)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -4. [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) +1. [Set up infrastructure and private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +2. [Collect and publish images to your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +3. [Set up a Kubernetes cluster (Skip this step for Docker installations)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +4. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) # Upgrades -To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/) +To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.](upgrades.md) -### [Next: Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/admin-settings/admin-settings.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md similarity index 65% rename from versioned_docs/version-2.0-2.4/admin-settings/admin-settings.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md index fb0557c9745..eb2d5802192 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/admin-settings.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -9,7 +9,7 @@ aliases: - /rancher/v2.0-v2.4/en/admin-settings/log-in/ --- -After installation, the [system administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. +After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. ## First Log In @@ -21,25 +21,25 @@ After you log into Rancher for the first time, Rancher will prompt you for a **R One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. -For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/). +For more information how authentication works and how to configure each provider, see [Authentication](about-authentication.md). ## Authorization Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). +For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)](manage-role-based-access-control-rbac.md). ## Pod Security Policies _Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. -For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). +For more information how to create and use PSPs, see [Pod Security Policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). ## Provisioning Drivers -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. +Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. -For more information, see [Provisioning Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/). +For more information, see [Provisioning Drivers](about-provisioning-drivers.md). ## Adding Kubernetes Versions into Rancher @@ -47,14 +47,14 @@ _Available as of v2.3.0_ With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. -The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/) +The information that Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md) -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md). -For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/). +For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). ## Enabling Experimental Features _Available as of v2.3.0_ -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) +Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](installation/options/feature-flags/) diff --git a/versioned_docs/version-2.0-2.4/backups/backups.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md similarity index 79% rename from versioned_docs/version-2.0-2.4/backups/backups.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md index 13b9122b7d3..c4eda7c857c 100644 --- a/versioned_docs/version-2.0-2.4/backups/backups.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -7,6 +7,6 @@ This section is devoted to protecting your data in a disaster scenario. To protect yourself from a disaster scenario, you should create backups on a regular basis. -- [Backup](./backup) -- [Restore](./restore) +- [Backup](backups/backup) +- [Restore](backups/restore) diff --git a/versioned_docs/version-2.0-2.4/best-practices/best-practices.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md similarity index 95% rename from versioned_docs/version-2.0-2.4/best-practices/best-practices.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md index 6ea4f98b952..4bb14b91783 100644 --- a/versioned_docs/version-2.0-2.4/best-practices/best-practices.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md @@ -13,7 +13,7 @@ Use the navigation bar on the left to find the current best practices for managi For more guidance on best practices, you can consult these resources: -- [Security]({{}}/rancher/v2.0-v2.4/en/security/) +- [Security](rancher-security.md) - [Rancher Blog](https://rancher.com/blog/) - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/production.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md similarity index 68% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/production/production.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md index ba2d48fdde9..c9e6c2cb5fb 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/production/production.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -5,19 +5,19 @@ weight: 2 In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. -For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) This is a shortlist of best practices that we strongly recommend for all production clusters. -For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.0-v2.4/en/best-practices) +For a full list of all the best practices that we recommend, refer to the [best practices section.](best-practices.md) ### Node Requirements -* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) including the port requirements. +* Make sure your nodes fulfill all of the [node requirements,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) including the port requirements. ### Back up etcd -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots](backups/v2.0.x-v2.4.x/backup/rke-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. ### Cluster Architecture @@ -30,10 +30,10 @@ For a full list of all the best practices that we recommend, refer to the [best * Assign two or more nodes the `controlplane` role for master component high availability. * Assign two or more nodes the `worker` role for workload rescheduling upon node failure. -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles) +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md) For more information about the -number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/) +number of nodes for each Kubernetes role, refer to the section on [recommended architecture.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) ### Logging and Monitoring @@ -47,4 +47,4 @@ number of nodes for each Kubernetes role, refer to the section on [recommended a ### Networking * Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). -* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cis-scans/cis-scans.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cis-scans/cis-scans.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md index b9c354f493c..87f4a0bde15 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cis-scans/cis-scans.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md @@ -20,7 +20,7 @@ _Available as of v2.4.0_ # Prerequisites -To run security scans on a cluster and access the generated reports, you must be an [Administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [Cluster Owner.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) +To run security scans on a cluster and access the generated reports, you must be an [Administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [Cluster Owner.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. @@ -109,7 +109,7 @@ Rancher provides a set of alerts for cluster scans. which are not configured to - A scheduled cluster scan was completed - A scheduled cluster scan has failures -> **Prerequisite:** You need to configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) before configuring, sending, or receiving alerts. +> **Prerequisite:** You need to configure a [notifier](../explanations/integrations-in-rancher/notifiers.md) before configuring, sending, or receiving alerts. To activate an existing alert for a CIS scan result, @@ -131,11 +131,11 @@ To create a new alert, 1. Enter a name for the alert. 1. In the **Is** field, set the alert to be triggered when a scan is completed or when a scan has a failure. 1. In the **Send a** field, set the alert as a **Critical,** **Warning,** or **Info** alert level. -1. Choose a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) for the alert. +1. Choose a [notifier](../explanations/integrations-in-rancher/notifiers.md) for the alert. **Result:** The alert is created and activated. The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. -For more information about alerts, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) +For more information about alerts, refer to [this page.](cluster-admin/tools/alerts/) # Deleting a Report @@ -153,4 +153,4 @@ For more information about alerts, refer to [this page.]({{}}/rancher/v # List of Skipped and Not Applicable Tests -For a list of skipped and not applicable tests, refer to this page. \ No newline at end of file +For a list of skipped and not applicable tests, refer to this page. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cli/cli.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md similarity index 77% rename from versioned_docs/version-2.0-2.4/cli/cli.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md index fe6e865db4e..bcb654ba14b 100644 --- a/versioned_docs/version-2.0-2.4/cli/cli.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md @@ -20,7 +20,7 @@ The binary can be downloaded directly from the UI. The link can be found in the After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - Your Rancher Server URL, which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../reference-guides/user-settings/api-keys.md). ### CLI Authentication @@ -34,7 +34,7 @@ If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to con ### Project Selection -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. **Example: `./rancher context switch` Output** ``` @@ -61,16 +61,16 @@ The following commands are available for use in Rancher CLI. | Command | Result | |---|---| | `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or Rancher charts. | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | +| `catalog` | Performs operations on [catalogs](catalog/). | +| `clusters, [cluster]` | Performs operations on your [clusters](kubernetes-clusters-in-rancher-setup.md). | +| `context` | Switches between Rancher [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects](k8s-in-rancher/projects-and-namespaces/) and [workloads](workloads-and-pods.md)). Specify resources by name or ID. | | `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | | `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | | `namespaces, [namespace]` |Performs operations on namespaces. | | `nodes, [node]` |Performs operations on nodes. | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads) in a project. | +| `projects, [project]` | Performs operations on [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). | +| `ps` | Displays [workloads](workloads-and-pods.md) in a project. | | `settings, [setting]` | Shows the current settings for your Rancher Server. | | `ssh` | Connects to one of your cluster nodes using the SSH protocol. | | `help, [h]` | Shows a list of commands or help for one command. | diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/cluster-alerts.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md similarity index 91% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/cluster-alerts.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md index 67430e68c2f..a63ac431c73 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-alerts/cluster-alerts.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md @@ -30,13 +30,13 @@ This section covers the following topics: # About Alerts -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) and [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) of events they need to address. Before you can receive alerts, you must configure one or more notifier in Rancher. -When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them. +When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier](../explanations/integrations-in-rancher/notifiers.md) for them. -For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts) +For details about what triggers the predefined alerts, refer to the [documentation on default alerts.](cluster-admin/tools/alerts/default-alerts) ### Alert Event Examples @@ -49,9 +49,9 @@ Some examples of alert events are: ### Alerts Triggered by Prometheus Queries -When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/) +When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/) -Monitoring must be [enabled]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/) before you can trigger alerts with custom Prometheus queries or expressions. +Monitoring must be [enabled](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/) before you can trigger alerts with custom Prometheus queries or expressions. ### Urgency Levels @@ -59,7 +59,7 @@ You can set an urgency level for each alert. This urgency appears in the notific ### Scope of Alerts -The scope for alerts can be set at either the cluster level or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/). +The scope for alerts can be set at either the cluster level or [project level](project-admin/tools/alerts/). At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: @@ -80,15 +80,15 @@ After you set up cluster alerts, you can manage each alert object. To manage ale # Adding Cluster Alerts -As a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. +As a [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to send you alerts for cluster events. ->**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/). +>**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier](monitoring-alerting/legacy/notifiers/). 1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. 1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. 1. Based on the type of alert you want to create, refer to the [cluster alert configuration section.](#cluster-alert-configuration) 1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) to send the alerts to. +1. Finally, choose the [notifiers](../explanations/integrations-in-rancher/notifiers.md) to send the alerts to. - You can set up multiple notifiers. - You can change notifier recipients on the fly. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/editing-clusters/editing-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md similarity index 63% rename from versioned_docs/version-2.0-2.4/cluster-admin/editing-clusters/editing-clusters.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md index 288e92480e1..2ad784ff2f6 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/editing-clusters/editing-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md @@ -5,7 +5,7 @@ weight: 2025 After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. -For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members) +For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) - [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) - [Editing Clusters in the Rancher UI](#editing-clusters-in-the-rancher-ui) @@ -14,11 +14,11 @@ For information on editing cluster membership, go to [this page.]({{}}/ ### Cluster Management Capabilities by Cluster Type -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. The following table summarizes the options and settings available for each cluster type: -import ClusterCapabilitiesTable from '/rancher/v2.0-v2.4/en/shared-files/_cluster-capabilities-table.md'; +import ClusterCapabilitiesTable from 'shared-files/_cluster-capabilities-table.md'; @@ -26,22 +26,22 @@ import ClusterCapabilitiesTable from '/rancher/v2.0-v2.4/en/shared-files/_cluste To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. -In [clusters launched by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. +In [clusters launched by RKE](launch-kubernetes-with-rancher.md), you can edit any of the remaining options that follow. Note that these options are not available for imported clusters or hosted Kubernetes clusters. Option | Description | ---------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes). | + Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes](../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). | Network Provider | The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{}}/rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | + Pod Security Policy Support | Enables [pod security policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | + Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version](installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | + Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option](cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | ### Editing Clusters with YAML @@ -50,13 +50,13 @@ Instead of using the Rancher UI to choose Kubernetes options for the cluster, ad - To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. - To read from an existing RKE file, click **Read from File**. -![image]({{}}/img/rancher/cluster-options-yaml.png) +![image](/img/cluster-options-yaml.png) -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). +For an example of RKE config file syntax, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). -For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) +For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) -In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) +In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) >**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/cluster-logging.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md similarity index 85% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/cluster-logging.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md index 60af88703f2..c558971b995 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-logging/cluster-logging.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md @@ -61,8 +61,8 @@ Logging Driver: json-file You can configure logging at either cluster level or project level. -- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. -- [Project logging]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/logging/) writes logs for every pod in that particular project. +- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters](launch-kubernetes-with-rancher.md), it also writes logs for all the Kubernetes system components. +- [Project logging](project-admin/tools/logging/) writes logs for every pod in that particular project. Logs that are sent to your logging service are from the following locations: @@ -71,7 +71,7 @@ Logs that are sent to your logging service are from the following locations: # Enabling Cluster Logging -As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. +As an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. 1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. @@ -79,11 +79,11 @@ As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/glo 1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: - - [Elasticsearch]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd/) + - [Elasticsearch](cluster-admin/tools/logging/elasticsearch/) + - [Splunk](cluster-admin/tools/logging/splunk/) + - [Kafka](cluster-admin/tools/logging/kafka/) + - [Syslog](cluster-admin/tools/logging/syslog/) + - [Fluentd](cluster-admin/tools/logging/fluentd/) 1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-monitoring.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md similarity index 83% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-monitoring.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md index d21612e27f5..0e1a74ea9ca 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/cluster-monitoring/cluster-monitoring.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md @@ -29,7 +29,7 @@ This section covers the following topics: Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): -You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/). +You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section](project-admin/tools/monitoring/). >A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. @@ -41,7 +41,7 @@ Multi-tenancy support in terms of cluster-only and project-only Prometheus insta # Monitoring Scope -Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. +Using Prometheus, you can monitor Rancher at both the cluster level and [project level](project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - Cluster monitoring allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. @@ -49,11 +49,11 @@ Using Prometheus, you can monitor Rancher at both the cluster level and [project - etcd database - All nodes (including workers) -- [Project monitoring]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. +- [Project monitoring](project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. # Enabling Cluster Monitoring -As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. +As an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. > **Prerequisites:** The following TCP ports need to be opened for metrics scraping: > @@ -72,11 +72,11 @@ As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/glo 1. Select **Tools > Monitoring** in the navigation bar. -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. +1. Select **Enable** to show the [Prometheus configuration options](monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. 1. Click **Save**. -**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/) through the Rancher dashboard or directly from Grafana. +**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application](catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics](monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/) through the Rancher dashboard or directly from Grafana. > The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/cluster-yml-templates.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-yml.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/cluster-yml-templates/cluster-yml-templates.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-yml.md diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md similarity index 65% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md index ea9a810d8c5..5b4b85382ff 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/microsoft-adfs/microsoft-adfs.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -22,10 +22,10 @@ You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/window Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. -- [1. Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2. Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) +- [1. Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) +- [2. Configuring Rancher for Microsoft AD FS](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md) {{< saml_caveats >}} -### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) +### [Next: Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md similarity index 92% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md index 93d1145f864..f51831b2f63 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md @@ -19,9 +19,9 @@ Rancher must be configured with a LDAP bind account (aka service account) to sea ## Configure OpenLDAP in Rancher -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](./openldap-config) +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). 1. Log into the Rancher UI using the initial local `admin` account. 2. From the **Global** view, navigate to **Security** > **Authentication** @@ -49,4 +49,4 @@ Once you have completed the configuration, proceed by testing the connection to ## Annex: Troubleshooting -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/shibboleth.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md similarity index 92% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/shibboleth.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md index 511f930b9d1..9a1cd6ac237 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/shibboleth/shibboleth.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md @@ -11,7 +11,7 @@ In this configuration, when Rancher users log in, they will be redirected to the If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. -> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](./about) +> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md) This section covers the following topics: @@ -96,9 +96,9 @@ Rancher must be configured with a LDAP bind account (aka service account) to sea ### Configure OpenLDAP in Rancher -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/openldap-config) Note that nested group membership is not available for Shibboleth. +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) Note that nested group membership is not available for Shibboleth. -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). 1. Log into the Rancher UI using the initial local `admin` account. 2. From the **Global** view, navigate to **Security** > **Authentication** @@ -106,4 +106,4 @@ Configure the settings for the OpenLDAP server, groups and users. For help filli # Troubleshooting -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/volumes-and-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md similarity index 59% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/volumes-and-storage.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md index 2be0a8a58d4..c6bc3b538e3 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/volumes-and-storage.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -9,15 +9,15 @@ aliases: --- When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. -The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) +The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) ### Prerequisites -To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. +To set up persistent storage, the `Manage Volumes` [role](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) +For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](cluster-provisioning/rke-clusters/options/cloud-providers/) For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. @@ -30,7 +30,7 @@ The overall workflow for setting up existing storage is as follows: 3. Add a persistent volume claim (PVC) that refers to the PV. 4. Mount the PVC as a volume in your workload. -For details and prerequisites, refer to [this page.](./attaching-existing-storage) +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md) ### Dynamically Provisioning New Storage in Rancher @@ -40,7 +40,7 @@ The overall workflow for provisioning new storage is as follows: 2. Add a persistent volume claim (PVC) that refers to the storage class. 3. Mount the PVC as a volume for your workload. -For details and prerequisites, refer to [this page.](./provisioning-new-storage) +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) ### Longhorn Storage @@ -52,18 +52,18 @@ If you have a pool of block storage, Longhorn can help you provide persistent st ### Provisioning Storage Examples -We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) +We provide examples of how to provision storage with [NFS,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) [vSphere,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) and [Amazon's EBS.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) ### GlusterFS Volumes -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md) ### iSCSI Volumes -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) +In [Rancher Launched Kubernetes clusters](launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md) ### hostPath Volumes -Before you create a hostPath volume, you need to set up an [extra_bind]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. +Before you create a hostPath volume, you need to set up an [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. ### Related Links diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md new file mode 100644 index 00000000000..30fb906c5f1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md @@ -0,0 +1,16 @@ +--- +title: VSphere Node Template Configuration +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference + - /rancher/v2.0-v2.4/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids +--- + +The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: + +- [v2.3.3](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md) +- [v2.3.0](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md) +- [v2.2.0](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md) +- [v2.0.4](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md) + +For Rancher versions before v2.0.4, refer to [this version.](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md new file mode 100644 index 00000000000..cc35e1a37fe --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md @@ -0,0 +1,16 @@ +--- +title: Deploying Rancher Server +weight: 100 +--- + +Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. + +- [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) +- [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) +- [Azure](../getting-started/quick-start-guides/deploy-rancher-manager/azure.md) (uses Terraform) +- [GCP](../getting-started/quick-start-guides/deploy-rancher-manager/gcp.md) (uses Terraform) +- [Vagrant](../getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md) + +If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. + +- [Manual Install](../getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md new file mode 100644 index 00000000000..cf89f3341a1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md @@ -0,0 +1,9 @@ +--- +title: Deploying Workloads +weight: 200 +--- + +These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. + +- [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) +- [Workload with NodePort](../getting-started/quick-start-guides/deploy-workloads/nodeports.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/feature-flags.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/resources/feature-flags/feature-flags.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md index 1f4839d8e1c..b843f8027c9 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/feature-flags/feature-flags.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md @@ -9,7 +9,7 @@ aliases: import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. +Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. The features can be enabled in three ways: @@ -33,9 +33,9 @@ For example, if you install Rancher, then set a feature flag to true with the Ra The following is a list of the feature flags available in Rancher: - `dashboard`: This feature enables the new experimental UI that has a new look and feel. The dashboard also leverages a new API in Rancher which allows the UI to access the default Kubernetes resources without any intervention from Rancher. -- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules](installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. - `proxy`: This feature enables Rancher to use a new simplified code base for the proxy, which can help enhance performance and security. The proxy feature is known to have issues with Helm deployments, which prevents any catalog applications to be deployed which includes Rancher's tools like monitoring, logging, Istio, etc. -- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.](installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. The below table shows the availability and default value for feature flags in Rancher: @@ -71,7 +71,7 @@ Note: If you are installing an alpha version, Helm requires adding the `--devel` ### Rendering the Helm Chart for Air Gap Installations -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher) +For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. diff --git a/versioned_docs/version-2.0-2.4/helm-charts/helm-charts.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md similarity index 83% rename from versioned_docs/version-2.0-2.4/helm-charts/helm-charts.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md index 989a22e38ca..0788be49086 100644 --- a/versioned_docs/version-2.0-2.4/helm-charts/helm-charts.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md @@ -43,7 +43,7 @@ Project | This specific cluster can access the Helm charts in this catalog | v2 _Applicable as of v2.4.0_ -In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. +In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible](helm-charts/legacy-catalogs/#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. When you create a custom catalog, you will have to configure the catalog to use either Helm 2 or Helm 3. This version cannot be changed later. If the catalog is added with the wrong Helm version, it will need to be deleted and re-added. @@ -74,23 +74,23 @@ apiVersion `v2` is now reserved for Helm 3 charts. This apiVersion enforcement c # Built-in Global Catalogs -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.]({{}}/rancher/v2.0-v2.4/en/catalog/built-in) +Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.](catalog/built-in) # Custom Catalogs -There are two types of catalogs in Rancher: [Built-in global catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/built-in/) and [custom catalogs.]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs/) +There are two types of catalogs in Rancher: [Built-in global catalogs](catalog/built-in/) and [custom catalogs.](catalog/adding-catalogs/) -Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs) and the [catalog configuration reference.]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) +Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs](catalog/adding-catalogs) and the [catalog configuration reference.](catalog/catalog-config) # Creating and Launching Applications In Rancher, applications are deployed from the templates in a catalog. This section covers the following topics: -* [Multi-cluster applications]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) -* [Creating catalog apps]({{}}/rancher/v2.0-v2.4/en/catalog/creating-apps) -* [Launching catalog apps within a project]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps) -* [Managing catalog apps]({{}}/rancher/v2.0-v2.4/en/catalog/managing-apps) -* [Tutorial: Example custom chart creation]({{}}/rancher/v2.0-v2.4/en/catalog/tutorial) +* [Multi-cluster applications](catalog/multi-cluster-apps/) +* [Creating catalog apps](catalog/creating-apps) +* [Launching catalog apps within a project](catalog/launching-apps) +* [Managing catalog apps](catalog/managing-apps) +* [Tutorial: Example custom chart creation](catalog/tutorial) # Chart Compatibility with Rancher @@ -102,4 +102,4 @@ _Available as v2.2.0_ When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. -For more information on how to use this feature, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). +For more information on how to use this feature, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/helm-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md similarity index 82% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/helm-rancher.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md index dc96db26d41..04bf72fd0eb 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-rancher/helm-rancher.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md @@ -8,15 +8,15 @@ aliases: Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/). +For systems without direct internet access, see [Air Gap: Kubernetes install](installation/air-gap-installation/install-rancher/). -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. > **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) ### Add the Helm Chart Repository -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version). +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md). {{< release-channel >}} @@ -30,7 +30,7 @@ Rancher Server is designed to be secure by default and requires SSL/TLS configur There are three recommended options for the source of the certificate. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). | Configuration | Chart option | Description | Requires cert-manager | |-----|-----|-----|-----| @@ -40,12 +40,12 @@ There are three recommended options for the source of the certificate. ### Optional: Install cert-manager -**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). +**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). > **Important:** > Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. @@ -162,7 +162,7 @@ deployment "rancher" successfully rolled out Create Kubernetes secrets from your own certificates for Rancher to use. -> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) +> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - Set `hostname` and set `ingress.tls.source` to `secret`. - If you are installing an alpha version, Helm requires adding the `--devel` option to the command. @@ -186,7 +186,7 @@ helm install rancher-/rancher \ --set privateCA=true ``` -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. +Now that Rancher is deployed, see [Adding TLS Secrets](installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. After adding the secrets, check if Rancher was rolled out successfully: @@ -210,11 +210,11 @@ It should show the same count for `DESIRED` and `AVAILABLE`. The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. -* [HTTP Proxy]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/) -* [Private Docker Image Registry]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -* [TLS Termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) +* [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md) +* [Private Docker Image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +* [TLS Termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) -See the [Chart Options]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/) for the full list of options. +See the [Chart Options](installation/options/helm2/helm-rancher/chart-options/) for the full list of options. ### Save your options @@ -224,4 +224,4 @@ Make sure you save the `--set` options you used. You will need to use the same o That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/troubleshooting/) Page +Doesn't work? Take a look at the [Troubleshooting](installation/options/helm2/helm-rancher/troubleshooting/) Page diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/create-nodes-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md similarity index 74% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/create-nodes-lb.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md index 87cbb05f594..02b9078dd0f 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/create-nodes-lb/create-nodes-lb.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md @@ -12,9 +12,9 @@ Use your provider of choice to provision 3 nodes and a Load Balancer endpoint fo ### Node Requirements -View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements](installation-requirements.md). -View the OS requirements for RKE at [RKE Requirements]({{}}/rke/latest/en/os/) +View the OS requirements for RKE at [RKE Requirements](https://rancher.com/docs/rke/latest/en/os/) ### Load Balancer @@ -27,7 +27,7 @@ Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configurat #### Examples -* [Nginx]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nginx/) -* [Amazon NLB]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nlb/) +* [Nginx](installation/options/helm2/create-nodes-lb/nginx/) +* [Amazon NLB](installation/options/helm2/create-nodes-lb/nlb/) -### [Next: Install Kubernetes with RKE]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/) +### [Next: Install Kubernetes with RKE](installation/options/helm2/kubernetes-rke/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/helm-init.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md similarity index 87% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/helm-init.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md index f3186308967..83c576d5b4f 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm-init/helm-init.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md @@ -9,9 +9,9 @@ aliases: Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. -For systems without direct internet access, see [Helm - Air Gap]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) for install details. +For systems without direct internet access, see [Helm - Air Gap](air-gapped-helm-cli-install.md) for install details. -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. > **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) @@ -64,6 +64,6 @@ Server: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b ### Issues or errors? -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/troubleshooting/) page. +See the [Troubleshooting](installation/options/helm2/helm-init/troubleshooting/) page. -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) +### [Next: Install Rancher](installation/options/helm2/helm-rancher/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/kubernetes-rke/kubernetes-rke.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md similarity index 84% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/kubernetes-rke/kubernetes-rke.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md index aeb3d54a9dd..fc3f72ddabf 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/kubernetes-rke/kubernetes-rke.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md @@ -8,7 +8,7 @@ aliases: Use RKE to install Kubernetes with a high availability etcd configuration. ->**Note:** For systems without direct internet access see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) for install details. +>**Note:** For systems without direct internet access see [Air Gap: Kubernetes install](installation/air-gap-high-availability/) for install details. ### Create the `rancher-cluster.yml` File @@ -53,9 +53,9 @@ services: RKE has many configuration options for customizing the install to suit your specific environment. -Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. +Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. -For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/). +For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide](installation/options/etcd/). ### Run RKE @@ -123,13 +123,13 @@ kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed Save a copy of the following files in a secure location: - `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. ### Issues or errors? -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/troubleshooting/) page. +See the [Troubleshooting](installation/options/helm2/kubernetes-rke/troubleshooting/) page. -### [Next: Initialize Helm (Install tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) +### [Next: Initialize Helm (Install tiller)](installation/options/helm2/helm-init/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/layer-4-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md similarity index 94% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/layer-4-lb.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md index 8635d687969..d345c76b86a 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/layer-4-lb.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md @@ -8,9 +8,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: @@ -20,7 +20,7 @@ This procedure walks you through setting up a 3-node cluster using the Rancher K In a Kubernetes setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers -![High-availability Kubernetes installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) +![High-availability Kubernetes installation of Rancher](/img/ha/rancher2ha.svg) ## Installation Outline @@ -47,11 +47,11 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +Provision three Linux hosts according to our [Requirements](installation-requirements.md). ## 2. Configure Load Balancer -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](./nlb) +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md) >**Note:** > In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. @@ -151,7 +151,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -170,8 +170,8 @@ RKE uses a `.yml` config file to install and configure your Kubernetes cluster. >**Advanced Config Options:** > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). + >- Want records of all transactions with the Rancher API? Enable the [API Auditing](installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file](installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). 2. Rename the file to `rancher-cluster.yml`. @@ -187,7 +187,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -393,8 +393,8 @@ During installation, RKE automatically generates a config file named `kube_confi You have a couple of options: -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore](installation/backups-and-restoration/ha-backup-and-restoration). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md).
diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/layer-7-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md similarity index 90% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/layer-7-lb.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md index 964be334f5c..a410512521f 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/layer-7-lb.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md @@ -9,9 +9,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: @@ -21,7 +21,7 @@ This procedure walks you through setting up a 3-node cluster using the Rancher K In an Kubernetes setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. Kubernetes Rancher install with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) +![Rancher HA](/img/ha/rancher2ha-l7.svg) ## Installation Outline @@ -46,7 +46,7 @@ Installation of Rancher in a high-availability configuration involves multiple p ## 1. Provision Linux Hosts -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). +Provision three Linux hosts according to our [Requirements](installation-requirements.md). ## 2. Configure Load Balancer @@ -100,7 +100,7 @@ Choose a fully qualified domain name (FQDN) that you want to use to access Ranch RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. 2. Confirm that RKE is now executable by running the following command: @@ -119,8 +119,8 @@ RKE uses a YAML config file to install and configure your Kubernetes cluster. Th >**Advanced Config Options:** > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). + >- Want records of all transactions with the Rancher API? Enable the [API Auditing](installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file](installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). 2. Rename the file to `rancher-cluster.yml`. @@ -137,7 +137,7 @@ Once you have the `rancher-cluster.yml` config file template, edit the nodes sec >**Note:** > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. nodes: # The IP address or hostname of the node @@ -177,7 +177,7 @@ Choose from the following options: > >- The certificate files must be in PEM format. >- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) @@ -284,8 +284,8 @@ During installation, RKE automatically generates a config file named `kube_confi ## What's Next? -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/). +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restore](backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster](tasks/clusters/creating-a-cluster/).
diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md similarity index 62% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md index aa383d05918..d6eb2e41a42 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/rke-add-on/troubleshooting/troubleshooting.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md @@ -10,9 +10,9 @@ aliases: > #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** > ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. This section contains common errors seen when setting up a Kubernetes installation. @@ -22,14 +22,14 @@ Choose from the following options: In this section, you can find generic ways to debug your Kubernetes cluster. -- [Failed to set up SSH tunneling for host]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) +- [Failed to set up SSH tunneling for host](https://rancher.com/docs/rke/latest/en/troubleshooting/ssh-connectivity-errors/) In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. -- [Failed to get job complete status](./job-complete-status/) +- [Failed to get job complete status](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md) In this section, you can find errors related to deploying addons. -- [404 - default backend]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/) +- [404 - default backend](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md) In this section, you can find errors related to the `404 - default backend` page that is shown when trying to access Rancher. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md new file mode 100644 index 00000000000..430999ac08c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md @@ -0,0 +1,19 @@ +--- +title: RKE Add-On Install +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + + +* [Kubernetes installation with External Load Balancer (TCP/Layer 4)](installation/options/helm2/rke-add-on/layer-4-lb) +* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)](installation/options/helm2/rke-add-on/layer-7-lb) +* [HTTP Proxy Configuration for a Kubernetes installation](installation/options/helm2/rke-add-on/proxy/) +* [Troubleshooting RKE Add-on Installs](installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm2.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md similarity index 66% rename from versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm2.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md index 552053f1816..ef3559cdda8 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/advanced/helm2/helm2.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md @@ -18,7 +18,7 @@ This procedure walks you through setting up a 3-node cluster with Rancher Kubern > **Important:** The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. -> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. +> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. ## Recommended Architecture @@ -28,7 +28,7 @@ This procedure walks you through setting up a 3-node cluster with Rancher Kubern - The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment.
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Install]({{}}/img/rancher/ha/rancher2ha.svg) +![High-availability Kubernetes Install](/img/ha/rancher2ha.svg) Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers ## Required Tools @@ -36,26 +36,26 @@ This procedure walks you through setting up a 3-node cluster with Rancher Kubern The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +- [rke](https://rancher.com/docs/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. ## Installation Outline -- [Create Nodes and Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/) -- [Install Kubernetes with RKE]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/) -- [Initialize Helm (tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) -- [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) +- [Create Nodes and Load Balancer](installation/options/helm2/create-nodes-lb/) +- [Install Kubernetes with RKE](installation/options/helm2/kubernetes-rke/) +- [Initialize Helm (tiller)](installation/options/helm2/helm-init/) +- [Install Rancher](installation/options/helm2/helm-rancher/) ## Additional Install Options -- [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) +- [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) ## Previous Methods -[RKE add-on install]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/) +[RKE add-on install](installation/options/helm2/rke-add-on/) > **Important: RKE add-on install is only supported up to Rancher v2.0.8** > -> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). +> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). > -> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md similarity index 70% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md index f3e5fdd4bfe..64195f8f1fb 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -21,17 +21,17 @@ The way that you manage HPAs is different based on your version of the Kubernete HPAs are also managed differently based on your version of Rancher: -- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). -- **For Rancher Before v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). +- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher Before v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md). You might have additional HPA installation steps if you are using an older version of Rancher: - **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -- **For Rancher Before v2.0.7:** Clusters created in Rancher before v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). +- **For Rancher Before v2.0.7:** Clusters created in Rancher before v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md). ## Testing HPAs with a Service Deployment -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). +In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). +(k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md new file mode 100644 index 00000000000..081207021e8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md @@ -0,0 +1,10 @@ +--- +title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. +shortTitle: Infrastructure Tutorials +weight: 5 +--- + +To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) + + +To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/cluster-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md similarity index 91% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/cluster-autoscaler.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md index 5de09b65baf..40dafa4b01a 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-autoscaler/cluster-autoscaler.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md @@ -22,4 +22,4 @@ Cluster Autoscaler provides support to distinct cloud providers. For more inform ### Setting up Cluster Autoscaler on Amazon Cloud Provider -For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/amazon) +For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md) diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/install-rancher-on-k8s.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md similarity index 81% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/install-rancher-on-k8s.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md index 074025d6377..f6e7859ec2d 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/install-rancher-on-k8s.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -22,9 +22,9 @@ The cluster requirements depend on the Rancher version: - **In Rancher v2.4.x,** Rancher needs to be installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. - **In Rancher before v2.4,** Rancher needs to be installed on an RKE Kubernetes cluster. -For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) +For the tutorial to install an RKE Kubernetes cluster, refer to [this page.](installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) -For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) +For the tutorial to install a K3s Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) # Install the Rancher Helm Chart @@ -32,13 +32,13 @@ Rancher is installed using the Helm package manager for Kubernetes. Helm charts With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/). +For systems without direct internet access, see [Air Gap: Kubernetes install](installation/air-gap-installation/install-rancher/). -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags) +To choose a Rancher version to install, refer to [Choosing a Rancher Version.](installation/options/server-tags) -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements](installation/options/helm-version) -> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. +> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section](installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. To set up Rancher, @@ -58,11 +58,11 @@ The following CLI tools are required for setting up the Kubernetes cluster. Plea Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. ### 2. Add the Helm Chart Repository -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). {{< release-channel >}} @@ -82,7 +82,7 @@ kubectl create namespace cattle-system The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: @@ -99,14 +99,14 @@ There are three recommended options for the source of the certificate used for T ### 5. Install cert-manager -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). +> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`).
Click to Expand -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). +> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). @@ -219,7 +219,7 @@ When you run this command, the `hostname` option must match the `Common Name` or Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) +> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - Set the `hostname`. - Set `ingress.tls.source` to `secret`. @@ -242,18 +242,18 @@ helm install rancher rancher-/rancher \ --set privateCA=true ``` -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. +Now that Rancher is deployed, see [Adding TLS Secrets](installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. -- [HTTP Proxy]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) -- [Private Docker Image Registry]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) +- [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md#http-proxy) +- [Private Docker Image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) -See the [Chart Options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) for the full list of options. +See the [Chart Options](installation/resources/chart-options/) for the full list of options. ### 7. Verify that the Rancher Server is Successfully Deployed @@ -286,7 +286,7 @@ That's it. You should have a functional Rancher server. In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) Page +Doesn't work? Take a look at the [Troubleshooting](installation/options/troubleshooting/) Page ### Optional Next Steps diff --git a/versioned_docs/version-2.0-2.4/installation/installation.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md similarity index 53% rename from versioned_docs/version-2.0-2.4/installation/installation.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md index 44c5f923fe5..a37fd0b144c 100644 --- a/versioned_docs/version-2.0-2.4/installation/installation.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md @@ -42,9 +42,9 @@ There are also separate instructions for installing Rancher in an air gap enviro | Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | | ---------------------------------- | ------------------------------ | ---------- | -| With direct access to the Internet | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) | -| Behind an HTTP proxy | These [docs,]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) plus this [configuration]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) | These [docs,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) plus this [configuration]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) | -| In an air gap environment | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) | +| With direct access to the Internet | [Docs](install-upgrade-on-a-kubernetes-cluster.md) | [Docs](rancher-on-a-single-node-with-docker.md) | +| Behind an HTTP proxy | These [docs,](install-upgrade-on-a-kubernetes-cluster.md) plus this [configuration](../reference-guides/installation-references/helm-chart-options.md#http-proxy) | These [docs,](rancher-on-a-single-node-with-docker.md) plus this [configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) | +| In an air gap environment | [Docs](air-gapped-helm-cli-install.md) | [Docs](air-gapped-helm-cli-install.md) | We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. @@ -57,38 +57,38 @@ For that reason, we recommend that for a production-grade architecture, you shou For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. -Our [instructions for installing Rancher on Kubernetes]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. +Our [instructions for installing Rancher on Kubernetes](install-upgrade-on-a-kubernetes-cluster.md) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. -When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,]({{}}/rancher/v2.0-v2.4/en/installation/requirements) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. +When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,](installation-requirements.md) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. -For a longer discussion of Rancher architecture, refer to the [architecture overview,]({{}}/rancher/v2.0-v2.4/en/overview/architecture) [recommendations for production-grade architecture,]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) or our [best practices guide.]({{}}/rancher/v2.0-v2.4/en/best-practices/deployment-types) +For a longer discussion of Rancher architecture, refer to the [architecture overview,](rancher-manager-architecture.md) [recommendations for production-grade architecture,](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) or our [best practices guide.](../reference-guides/best-practices/deployment-types.md) # Prerequisites -Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.](installation-requirements.md) # Architecture Tip -For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. +For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. -For more architecture recommendations, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) +For more architecture recommendations, refer to [this page.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) ### More Options for Installations on a Kubernetes Cluster -Refer to the [Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: +Refer to the [Helm chart options](installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: -- With [API auditing to record all transactions]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) -- With [TLS termination on a load balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) -- With a [custom Ingress]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#customizing-your-ingress) +- With [API auditing to record all transactions](../reference-guides/installation-references/helm-chart-options.md#api-audit-log) +- With [TLS termination on a load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) +- With a [custom Ingress](../reference-guides/installation-references/helm-chart-options.md#customizing-your-ingress) In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: -- [RKE configuration options]({{}}/rke/latest/en/config-options/) -- [K3s configuration options]({{}}/k3s/latest/en/installation/install-options/) +- [RKE configuration options](https://rancher.com/docs/rke/latest/en/config-options/) +- [K3s configuration options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) ### More Options for Installations with Docker -Refer to the [docs about options for Docker installs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) for details about other configurations including: +Refer to the [docs about options for Docker installs](rancher-on-a-single-node-with-docker.md) for details about other configurations including: -- With [API auditing to record all transactions]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) -- With an [external load balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb/) -- With a [persistent data store]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#persistent-data) +- With [API auditing to record all transactions](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) +- With an [external load balancer](installation/options/single-node-install-external-lb/) +- With a [persistent data store](../reference-guides/single-node-rancher-in-docker/advanced-options.md#persistent-data) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/requirements/requirements.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md similarity index 84% rename from versioned_docs/version-2.0-2.4/installation/requirements/requirements.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md index d8eeb45d2ba..c1b1c248fef 100644 --- a/versioned_docs/version-2.0-2.4/installation/requirements/requirements.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md @@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem'; This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) which will run your apps and services. +> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) which will run your apps and services. Make sure the node(s) for the Rancher server fulfill the following requirements: @@ -22,7 +22,7 @@ Make sure the node(s) for the Rancher server fulfill the following requirements: - [Node IP Addresses](#node-ip-addresses) - [Port Requirements](#port-requirements) -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.0-v2.4/en/best-practices/deployment-types/) +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.](../reference-guides/best-practices/deployment-types.md) The Rancher UI works best in Firefox or Chrome. @@ -38,7 +38,7 @@ The `ntp` (Network Time Protocol) package should be installed. This prevents err Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.0-v2.4/en/installation/options/arm64-platform/) +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).](installation/options/arm64-platform/) ### RKE Specific Requirements @@ -50,14 +50,14 @@ For the container runtime, K3s should work with any modern version of Docker or Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. +If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. +If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. ### Installing Docker -Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.0-v2.4/en/installation/requirements/installing-docker) to install Docker with one command. +Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts](../getting-started/installation-and-upgrade/installation-requirements/install-docker.md) to install Docker with one command. # Hardware Requirements This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. @@ -69,7 +69,7 @@ Hardware requirements scale based on the size of your Rancher deployment. Provis -These requirements apply to each host in an [RKE Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) +These requirements apply to each host in an [RKE Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) Performance increased in Rancher v2.4.0. For the requirements of Rancher before v2.4.0, refer to [this section.](#cpu-and-memory-for-rancher-before-v2-4-0) @@ -87,7 +87,7 @@ Every use case and environment is different. Please [contact Rancher](https://ra -These requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) +These requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) | Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | | --------------- | ---------- | ------------ | -------| ---------| ------------------------- | @@ -103,7 +103,7 @@ Every use case and environment is different. Please [contact Rancher](https://ra -These requirements apply to a host with a [single-node]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) installation of Rancher. +These requirements apply to a host with a [single-node](rancher-on-a-single-node-with-docker.md) installation of Rancher. | Deployment Size | Clusters | Nodes | vCPUs | RAM | | --------------- | -------- | --------- | ----- | ---- | @@ -144,4 +144,4 @@ Each node used should have a static IP configured, regardless of whether you are ### Port Requirements -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. \ No newline at end of file +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements](../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md new file mode 100644 index 00000000000..850213fe2b2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md @@ -0,0 +1,26 @@ +--- +title: Setup Guide +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup + - /rancher/v2.0-v2.4/en/istio/legacy/setup + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/ +--- + +This section describes how to enable Istio and start using it in your projects. + +This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. + +If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. + +> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) and [setting up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) + +1. [Enable Istio in the cluster.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md) +1. [Enable Istio in all the namespaces where you want to use it.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) +1. [Select the nodes where the main Istio components will be deployed.](../how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md) +1. [Add deployments and services that have the Istio sidecar injected.](../how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md) +1. [Set up the Istio gateway. ](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) +1. [Set up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) +1. [Generate traffic and see Istio in action.](istio-setup-guide.md#view-traffic) + diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/istio.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/istio.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md index 4cbdfdd96b1..ca8bea220fd 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/istio/istio.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md @@ -40,19 +40,19 @@ Istio needs to be set up by a Rancher administrator or cluster administrator bef # Prerequisites -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) to run all of the components of Istio. # Setup Guide -Refer to the [setup guide]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. +Refer to the [setup guide](istio-setup-guide.md) for instructions on how to set up Istio and use it in a project. # Disabling Istio -To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio) +To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.](../explanations/integrations-in-rancher/istio/disable-istio.md) # Accessing Visualizations -> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/) +> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, see [this section.](../explanations/integrations-in-rancher/istio/rbac-for-istio.md) After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. @@ -90,4 +90,4 @@ For more information on the Istio sidecar, refer to the [Istio docs](https://ist By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) \ No newline at end of file +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/k8s-tutorials.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md similarity index 100% rename from versioned_docs/version-2.0-2.4/installation/resources/k8s-tutorials/k8s-tutorials.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-provisioning.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md similarity index 75% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-provisioning.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md index 155b729aeb8..b0c24c2d79a 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/cluster-provisioning.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -10,9 +10,9 @@ aliases: Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.0-v2.4/en/overview/architecture/) page. +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture](rancher-manager-architecture.md) page. This section covers the following topics: @@ -28,7 +28,7 @@ This section covers the following topics: The following table summarizes the options and settings available for each cluster type: -import ClusterCapabilitiesTable from '/rancher/v2.0-v2.4/en/shared-files/_cluster-capabilities-table.md'; +import ClusterCapabilitiesTable from 'shared-files/_cluster-capabilities-table.md'; @@ -38,11 +38,11 @@ In this scenario, Rancher does not provision Kubernetes because it is installed If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters) +For more information, refer to the section on [hosted Kubernetes clusters.](set-up-clusters-from-hosted-kubernetes-providers.md) # Launching Kubernetes with Rancher -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. +Rancher uses the [Rancher Kubernetes Engine (RKE)](https://rancher.com/docs/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. @@ -50,23 +50,23 @@ These nodes can be dynamically provisioned through Rancher's UI, which calls [Do If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) +For more information, refer to the section on [RKE clusters.](launch-kubernetes-with-rancher.md) ### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This template defines the parameters used to launch nodes in your cloud providers. One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. +The cloud providers available for creating a node template are decided based on the [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers) active in the Rancher UI. -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) +For more information, refer to the section on [nodes hosted by an infrastructure provider](use-new-nodes-in-an-infra-provider.md) ### Launching Kubernetes on Existing Custom Nodes -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,](use-existing-nodes.md) which creates a custom cluster. You can bring any nodes you want to Rancher and use them to create a cluster. @@ -84,17 +84,17 @@ For all imported Kubernetes clusters except for K3s clusters, the configuration In Rancher v2.4, it became possible to import a K3s cluster and upgrade Kubernetes by editing the cluster in the Rancher UI. -For more information, refer to the section on [importing existing clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) +For more information, refer to the section on [importing existing clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) ### Importing and Editing K3s Clusters _Available as of Rancher v2.4.0_ -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. +[K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - The ability to upgrade the K3s version - The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. -For more information, refer to the section on [imported K3s clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) \ No newline at end of file +For more information, refer to the section on [imported K3s clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md new file mode 100644 index 00000000000..4fc8354908f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md @@ -0,0 +1,18 @@ +--- +title: Kubernetes Components +weight: 100 +--- + +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. + +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes](../troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md) +- [Troubleshooting Controlplane Nodes](../troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md) +- [Troubleshooting nginx-proxy Nodes](../troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md) +- [Troubleshooting Worker Nodes and Generic Components](../troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md) + +# Kubernetes Component Diagram + +![Cluster diagram](/img/clusterdiagram.svg)
+Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md new file mode 100644 index 00000000000..1649faf19fc --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md @@ -0,0 +1,76 @@ +--- +title: Kubernetes Resources +weight: 19 +aliases: + - /rancher/v2.0-v2.4/en/concepts/ + - /rancher/v2.0-v2.4/en/tasks/ + - /rancher/v2.0-v2.4/en/concepts/resources/ +--- + +## Workloads + +Deploy applications to your cluster nodes using [workloads](workloads-and-pods.md), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. + +When deploying a workload, you can deploy from any image. There are a variety of [workload types](workloads-and-pods.md#workload-types) to choose from which determine how your application should run. + +Following a workload deployment, you can continue working with it. You can: + +- [Upgrade](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) the workload to a newer version of the application it's running. +- [Roll back](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) a workload to a previous version, if an issue occurs during upgrade. +- [Add a sidecar](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md), which is a workload that supports a primary workload. + +## Load Balancing and Ingress + +### Load Balancers + +After you launch an application, it's only available within the cluster. It can't be reached externally. + +If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +#### Ingress + +Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. + +Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. + +For more information, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). + +When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +For more information, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). + +## Service Discovery + +After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. + +For more information, see [Service Discovery](../how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md). + +## Pipelines + +After your project has been [configured to a version control provider](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. + +For more information, see [Pipelines](k8s-in-rancher/pipelines/). + +## Applications + +Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. + +For more information, see [Applications in a Project](catalog/apps/). + +## Kubernetes Resources + +Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. + +Resources include: + +- [Certificates](../how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md): Files used to encrypt/decrypt data entering or leaving the cluster. +- [ConfigMaps](../how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md): Files that store general configuration information, such as a group of config files. +- [Secrets](../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md): Files that store sensitive data like passwords, tokens, or keys. +- [Registries](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md): Files that carry credentials used to authenticate with private registries. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rke-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md similarity index 61% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rke-clusters.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md index 507a4524b72..42f4d9b31d8 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/rke-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -3,7 +3,7 @@ title: Launching Kubernetes with Rancher weight: 4 --- -You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: +You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - Bare-metal servers - On-premise virtual machines @@ -15,20 +15,20 @@ RKE clusters include clusters that Rancher launched on Windows nodes or other ex ### Requirements -If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. +If you use RKE to set up a cluster, your nodes must meet the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) for nodes in downstream user clusters. ### Launching Kubernetes on New Nodes in an Infrastructure Provider -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. -For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) +For more information, refer to the section on [launching Kubernetes on new nodes.](use-new-nodes-in-an-infra-provider.md) ### Launching Kubernetes on Existing Custom Nodes In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. +If you want to reuse a node from a previous custom cluster, [clean the node](admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. -For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) +For more information, refer to the section on [custom nodes.](use-existing-nodes.md) diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md similarity index 77% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md index 5c18feeec1c..09c4a898ac2 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -16,10 +16,10 @@ If you want your applications to be externally accessible, you must add a load b Rancher supports two types of load balancers: -- [Layer-4 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) -For more information, see [load balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). ### Load Balancer Limitations @@ -30,9 +30,9 @@ Load Balancers have a couple of limitations you should be aware of: - If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) + - [Support for Layer-4 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-4-load-balancing) - - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) + - [Support for Layer-7 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-7-load-balancing) ## Ingress @@ -58,6 +58,6 @@ Ingress can provide other functionality as well, such as SSL termination, name-b > >Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. -- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress). +- For more information on how to set up ingress in Rancher, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). - For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). +- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-admin.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md similarity index 67% rename from versioned_docs/version-2.0-2.4/cluster-admin/cluster-admin.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md index 63f8f4bebfb..293fd1bd4bf 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/cluster-admin.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md @@ -11,7 +11,7 @@ This page covers the following topics: - [Managing clusters in Rancher](#managing-clusters-in-rancher) - [Configuring tools](#configuring-tools) -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. ## Switching between Clusters @@ -21,9 +21,9 @@ Alternatively, you can switch between projects and clusters directly in the navi ## Managing Clusters in Rancher -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. +After clusters have been [provisioned into Rancher](kubernetes-clusters-in-rancher-setup.md), [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. -import ClusterCapabilitiesTable from '/rancher/v2.0-v2.4/en/shared-files/_cluster-capabilities-table.md'; +import ClusterCapabilitiesTable from 'shared-files/_cluster-capabilities-table.md'; @@ -38,4 +38,4 @@ Rancher contains a variety of tools that aren't included in Kubernetes to assist - Istio Service Mesh - OPA Gatekeeper -For more information, see [Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) +For more information, see [Tools](../reference-guides/rancher-cluster-tools.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/resource-quotas.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md similarity index 84% rename from versioned_docs/version-2.0-2.4/project-admin/resource-quotas/resource-quotas.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md index af27d4b3457..d3a6dd7b624 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/resource-quotas/resource-quotas.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md @@ -11,15 +11,15 @@ In situations where several teams share a cluster, one team may overconsume the This page is a how-to guide for creating resource quotas in existing projects. -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/#creating-projects) +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md) ### Applying Resource Quotas to Existing Projects _Available as of v2.0.1_ -Edit [resource quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: +Edit [resource quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - You want to limit the resources that a project and its namespaces can use. - You want to scale the resources available to a project up or down when a research quota is already in effect. @@ -32,7 +32,7 @@ Edit [resource quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/project 1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. -1. Select a Resource Type. For more information on types, see the [quota type reference.](./quota-type-reference) +1. Select a Resource Type. For more information on types, see the [quota type reference.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md) 1. Enter values for the **Project Limit** and the **Namespace Default Limit**. diff --git a/versioned_docs/version-2.0-2.4/project-admin/project-admin.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md similarity index 53% rename from versioned_docs/version-2.0-2.4/project-admin/project-admin.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md index a0a86c22d04..63bb5b0ac91 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/project-admin.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md @@ -19,19 +19,19 @@ Rancher projects resolve this issue by allowing you to apply resources and acces You can use projects to perform actions like: -- [Assign users access to a group of namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/project-members) -- Assign users [specific roles in a project]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{}}/rancher/v2.0-v2.4/en/project-admin/resource-quotas/) -- [Manage namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/) -- [Configure tools]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/) -- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines) -- [Configure pod security policies]({{}}/rancher/v2.0-v2.4/en/project-admin/pod-security-policies) +- [Assign users access to a group of namespaces](../how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md) +- Assign users [specific roles in a project](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). A role can be owner, member, read-only, or [custom](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md) +- [Set resource quotas](manage-project-resource-quotas.md) +- [Manage namespaces](../how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md) +- [Configure tools](project-tools.md) +- [Set up pipelines for continuous integration and deployment](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- [Configure pod security policies](../how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md) ### Authorization -Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. +Non-administrative users are only authorized for project access after an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owner or member](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) adds them to the project's **Members** tab. -Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). +Whoever creates the project automatically becomes a [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). ## Switching between Projects diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rbac/rbac.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md similarity index 72% rename from versioned_docs/version-2.0-2.4/admin-settings/rbac/rbac.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md index f563c614921..c8afca26eba 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/rbac/rbac.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -5,7 +5,7 @@ aliases: - /rancher/v2.0-v2.4/en/concepts/global-configuration/users-permissions-roles/ --- -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/), users can either be local or external. +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. After you configure external authentication, the users that display on the **Users** page changes. @@ -17,11 +17,11 @@ After you configure external authentication, the users that display on the **Use Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. -- [Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/): +- [Global Permissions](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md): Define user authorization outside the scope of any particular cluster. -- [Cluster and Project Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/): +- [Cluster and Project Roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md): Define user authorization inside the specific cluster or project where they are assigned the role. diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/v1.6-migration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md similarity index 69% rename from versioned_docs/version-2.0-2.4/v1.6-migration/v1.6-migration.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md index 0d26be4f72c..186cc30e795 100644 --- a/versioned_docs/version-2.0-2.4/v1.6-migration/v1.6-migration.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md @@ -15,20 +15,20 @@ This video demonstrates a complete walk through of migration from Rancher v1.6 t ## Migration Plan ->**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro). +>**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md). -- [1. Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started) +- [1. Get Started](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md) >**Already a Kubernetes user in v1.6?** > > _Get Started_ is the only section you need to review for migration to v2.x. You can skip everything else. -- [2. Migrate Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/) -- [3. Expose Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/) -- [4. Configure Health Checks]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps) -- [5. Schedule Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/) -- [6. Service Discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/) -- [7. Load Balancing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/) +- [2. Migrate Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md) +- [3. Expose Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md) +- [4. Configure Health Checks](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md) +- [5. Schedule Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md) +- [6. Service Discovery](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md) +- [7. Load Balancing](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md) ## Migration Example Files @@ -50,4 +50,4 @@ During migration, we'll export these services from Rancher v1.6. The export gen A file for Rancher-specific functionality such as health checks and load balancers. These files cannot be read by Rancher v2.x, so don't worry about their contents—we're discarding them and recreating them using the v2.x UI. -### [Next: Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started) +### [Next: Get Started](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/other-installation-methods.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md similarity index 57% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/other-installation-methods.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md index bc79e37d9a3..979c1de293f 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/other-installation-methods.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md @@ -5,13 +5,13 @@ weight: 3 ### Air Gapped Installations -Follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. ### Docker Installations -The [single-node Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. The Docker installation is for development and testing environments only. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pipelines/pipelines.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md similarity index 92% rename from versioned_docs/version-2.0-2.4/pipelines/pipelines.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md index f4c76b6f3ab..357108d130b 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/pipelines.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md @@ -23,7 +23,7 @@ After configuring Rancher and GitHub, you can deploy containers running Jenkins >**Notes:** > >- Pipelines improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/docs-for-v2.0.x). +>- Still using v2.0.x? See the pipeline documentation for [previous versions](k8s-in-rancher/pipelines/docs-for-v2.0.x). >- Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. This section covers the following topics: @@ -42,7 +42,7 @@ This section covers the following topics: # Concepts -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/concepts) +For an explanation of concepts and terminology used in this section, refer to [this page.](k8s-in-rancher/pipelines/concepts) # How Pipelines Work @@ -50,7 +50,7 @@ After enabling the ability to use pipelines in a project, you can configure mult A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. +Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories](k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: @@ -68,13 +68,13 @@ When you configure a pipeline in one of your projects, a namespace specifically Minio storage is used to store the logs for pipeline executions. - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage). + >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components](k8s-in-rancher/pipelines/storage). # Roles-based Access Control for Pipelines If you can access a project, you can enable repositories to start building pipelines. -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. +Only [administrators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure version control providers and manage global pipeline execution settings. Project members can only configure repositories and pipelines. @@ -210,7 +210,7 @@ Now that repositories are added to your project, you can start configuring the p 1. Find the repository that you want to set up a pipeline for. -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config) +1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.](k8s-in-rancher/pipelines/config) * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. @@ -230,7 +230,7 @@ Now that repositories are added to your project, you can start configuring the p # Pipeline Configuration Reference -Refer to [this page]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: +Refer to [this page](k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: - Run a script - Build and publish images @@ -269,7 +269,7 @@ Available Events: * **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. * **Tag**: When a tag is created in the repository, the pipeline is triggered. -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos/). +> **Note:** This option doesn't exist for Rancher's [example repositories](k8s-in-rancher/pipelines/example-repos/). ### Modifying the Event Triggers for the Repository diff --git a/versioned_docs/version-2.0-2.4/project-admin/tools/tools.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md similarity index 57% rename from versioned_docs/version-2.0-2.4/project-admin/tools/tools.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md index c1adfb7bc85..10d9011b841 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/tools/tools.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md @@ -15,13 +15,13 @@ Rancher contains a variety of tools that aren't included in Kubernetes to assist # Notifiers -[Notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. +[Notifiers](../explanations/integrations-in-rancher/notifiers.md) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. # Alerts -[Alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts) are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. +[Alerts](cluster-admin/tools/alerts) are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. -For details on project-level alerts, see [this page.](./project-alerts) +For details on project-level alerts, see [this page.](../reference-guides/rancher-project-tools/project-alerts.md) # Logging @@ -35,12 +35,12 @@ Logging is helpful because it allows you to: Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. -For details on setting up logging at the cluster level, refer to the [logging section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) +For details on setting up logging at the cluster level, refer to the [logging section.](cluster-admin/tools/logging) -For details on project-level logging, see [this section.](./project-logging) +For details on project-level logging, see [this section.](../reference-guides/rancher-project-tools/project-logging.md) # Monitoring _Available as of v2.2.0_ -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring) +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.](cluster-monitoring.md) diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/examples.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md similarity index 57% rename from versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/examples.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md index 491d3728cf3..1aa159e456b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/volumes-and-storage/examples/examples.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md @@ -10,6 +10,6 @@ Rancher supports persistent storage with a variety of volume plugins. However, b For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: -- [NFS](./nfs) -- [vSphere](./vsphere) -- [EBS](./ebs) +- [NFS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) +- [vSphere](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) +- [EBS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/quick-start-guide.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md similarity index 57% rename from versioned_docs/version-2.0-2.4/quick-start-guide/quick-start-guide.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md index 5491d63787e..ab4d7ab47a1 100644 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/quick-start-guide.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md @@ -4,14 +4,14 @@ metaDescription: Use this section to jump start your Rancher deployment and test short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. weight: 2 --- ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. We have Quick Start Guides for: -- [Deploying Rancher Server]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. +- [Deploying Rancher Server](deploy-rancher-manager.md): Get started running Rancher using the method most convenient for you. -- [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload/): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. +- [Deploying Workloads](deploy-rancher-workloads.md): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. -- [Using the CLI]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. +- [Using the CLI](../getting-started/quick-start-guides/cli.md): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/behind-proxy.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md similarity index 50% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/behind-proxy.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md index a40a5a02db0..a2dc3b1edaf 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/behind-proxy/behind-proxy.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -5,10 +5,10 @@ weight: 4 In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. -Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/). +Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). # Installation Outline -1. [Set up infrastructure]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/prepare-nodes/) -2. [Set up a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) -3. [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/install-rancher/) +1. [Set up infrastructure](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md) +2. [Set up a Kubernetes cluster](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md) +3. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/overview/architecture/architecture.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md similarity index 84% rename from versioned_docs/version-2.0-2.4/overview/architecture/architecture.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md index 8b7b065600c..46216f2e10c 100644 --- a/versioned_docs/version-2.0-2.4/overview/architecture/architecture.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md @@ -5,13 +5,13 @@ weight: 1 This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. -For information on the different ways that Rancher can be installed, refer to the [overview of installation options.]({{}}/rancher/v2.0-v2.4/en/installation/#overview-of-installation-options) +For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) -For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.0-v2.4/en/overview/#features-of-the-rancher-api-server) +For a list of main features of the Rancher API server, refer to the [overview section.](../getting-started/introduction/overview.md#features-of-the-rancher-api-server) -For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) +For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. This section covers the following topics: @@ -31,13 +31,13 @@ The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. -The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: +The diagram below shows how users can manipulate both [Rancher-launched Kubernetes](launch-kubernetes-with-rancher.md) clusters and [hosted Kubernetes](set-up-clusters-from-hosted-kubernetes-providers.md) clusters through Rancher's authentication proxy:
Managing Kubernetes Clusters through Rancher's Authentication Proxy
-![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) +![Architecture](/img/rancher-architecture-rancher-api-server.svg) You can install Rancher on a single node, or on a high-availability Kubernetes cluster. @@ -57,7 +57,7 @@ The below diagram shows how the cluster controllers, cluster agents, and node ag
Communicating with Downstream Clusters
-![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) +![Rancher Components](/img/rancher-architecture-cluster-controller.svg) The following descriptions correspond to the numbers in the diagram above: @@ -75,7 +75,7 @@ The authentication proxy forwards all Kubernetes API calls to downstream cluster Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. -By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. +By default, Rancher generates a [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. ### 2. Cluster Controllers and Cluster Agents @@ -107,7 +107,7 @@ The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/do An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. There are two main reasons why a user might need the authorized cluster endpoint: @@ -122,7 +122,7 @@ Like the authorized cluster endpoint, the `kube-api-auth` authentication service With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. -You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl) +You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) # Important Files @@ -134,7 +134,7 @@ The files mentioned below are needed to maintain, troubleshoot and upgrade your > **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. -For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) documentation. +For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) documentation. # Tools for Provisioning Kubernetes Clusters @@ -166,7 +166,7 @@ In this type of cluster, Rancher connects to a Kubernetes cluster that has alrea This diagram shows each component that the Rancher server is composed of: -![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) +![Rancher Components](/img/rancher-architecture-rancher-components.svg) The GitHub repositories for Rancher can be found at the following links: @@ -178,4 +178,4 @@ The GitHub repositories for Rancher can be found at the following links: - [Rancher CLI](https://github.com/rancher/cli) - [Catalog applications](https://github.com/rancher/helm) -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.0-v2.4/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md similarity index 81% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-docker.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md index 22dab597f2d..365317df2cd 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/single-node-docker.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -13,7 +13,7 @@ Rancher can be installed by running a single Docker container. In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. > **Want to use an external load balancer?** -> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb) instead. +> See [Docker Install with an External Load Balancer](installation/options/single-node-install-external-lb) instead. A Docker installation of Rancher is recommended only for development and testing purposes. @@ -21,11 +21,11 @@ For Rancher v2.0-v2.4, there is no migration path from a Docker installation to # Requirements for OS, Docker, Hardware, and Networking -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) +Make sure that your node fulfills the general [installation requirements.](installation-requirements.md) # 1. Provision Linux Host -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements) to launch your Rancher server. +Provision a single Linux host according to our [Requirements](installation-requirements.md) to launch your Rancher server. # 2. Choose an SSL Option and Install Rancher @@ -33,10 +33,10 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher > **Do you want to...** > -> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) -> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -> - Record all transactions with the Rancher API? See [API Auditing](./advanced/#api-audit-log) +> - Use a proxy? See [HTTP Proxy Configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) +> - Complete an Air Gap Installation? See [Air Gap: Docker Install](installation/air-gap-single-node/) +> - Record all transactions with the Rancher API? See [API Auditing](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/advanced/#api-audit-log) Choose from the following options: @@ -65,7 +65,7 @@ In development or testing environments where your team will access your Rancher > Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. > > - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. @@ -92,7 +92,7 @@ In production environments where you're exposing an app publicly, use a certific > **Prerequisites:** > > - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) +> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) After obtaining your certificate, run the Docker command below. @@ -151,13 +151,13 @@ When installing Rancher on a single node with Docker, there are several advanced - Persistent Data - Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node -Refer to [this page](./advanced) for details. +Refer to [this page](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for details. ## Troubleshooting -Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. +Refer to [this page](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) for frequently asked questions and troubleshooting tips. ## What's Next? -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). +- **Recommended:** Review [Single Node Backup and Restore](installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md). diff --git a/versioned_docs/version-2.0-2.4/security/security.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md similarity index 70% rename from versioned_docs/version-2.0-2.4/security/security.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md index 876ac3e6e4f..b173a21deca 100644 --- a/versioned_docs/version-2.0-2.4/security/security.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md @@ -20,7 +20,7 @@ weight: 20 -Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. +Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,](manage-role-based-access-control-rbac.md) Rancher makes your Kubernetes clusters even more secure. On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: @@ -46,7 +46,7 @@ The Benchmark provides recommendations of two types: Scored and Not Scored. We r When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. -For details, refer to the section on [security scans.]({{}}/rancher/v2.0-v2.4/en/cis-scans) +For details, refer to the section on [security scans.](cis-scans) ### Rancher Hardening Guide @@ -60,12 +60,12 @@ Each version of the hardening guide is intended to be used with specific version Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ -[Hardening Guide v2.4]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.5]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 -[Hardening Guide v2.3]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 -[Hardening Guide v2.2]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 -[Hardening Guide v2.1]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 +[Hardening Guide v2.4](security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 +[Hardening Guide v2.3.5](security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 +[Hardening Guide v2.3.3](security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 +[Hardening Guide v2.3](security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 +[Hardening Guide v2.2](security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 +[Hardening Guide v2.1](security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 ### The CIS Benchmark and Self-Assessment @@ -77,12 +77,12 @@ Each version of Rancher's self-assessment guide corresponds to specific versions Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- -[Self Assessment Guide v2.4]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.5]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 -[Self Assessment Guide v2.3]({{}}/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 -[Self Assessment Guide v2.2]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 -[Self Assessment Guide v2.1]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 +[Self Assessment Guide v2.4](security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 +[Self Assessment Guide v2.3.5](security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 +[Self Assessment Guide v2.3.3](security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 +[Self Assessment Guide v2.3](../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 +[Self Assessment Guide v2.2](security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 +[Self Assessment Guide v2.1](security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 ### Third-party Penetration Test Reports @@ -95,4 +95,4 @@ Results: ### Rancher CVEs and Resolutions -Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](./cve) +Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](../reference-guides/rancher-security/security-advisories-and-cves.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.1/rancher-2.1.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md similarity index 58% rename from versioned_docs/version-2.0-2.4/security/rancher-2.1/rancher-2.1.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md index 8e443a49919..232d2a371a6 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.1/rancher-2.1.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.1) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes ### Hardening Guide -This hardening [guide](./hardening-2.1) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.2/rancher-2.2.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md similarity index 59% rename from versioned_docs/version-2.0-2.4/security/rancher-2.2/rancher-2.2.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md index a485c707358..1b955de4eb1 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.2/rancher-2.2.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.2) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes ### Hardening Guide -This hardening [guide](./hardening-2.2) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-2.3.x.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md similarity index 51% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-2.3.x.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md index e50a8c2f175..4d97e16bf6c 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-2.3.x.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md @@ -7,6 +7,6 @@ aliases: The relevant Hardening Guide and Self Assessment guide depends on your Rancher version: -- [Rancher v2.3.5](./rancher-v2.3.5) -- [Rancher v2.3.3](./rancher-v2.3.3) -- [Rancher v2.3.0](./rancher-v2.3.0) \ No newline at end of file +- [Rancher v2.3.5](security/rancher-2.3.x/rancher-v2.3.5) +- [Rancher v2.3.3](security/rancher-2.3.x/rancher-v2.3.3) +- [Rancher v2.3.0](security/rancher-2.3.x/rancher-v2.3.0) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.4/rancher-2.4.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md similarity index 58% rename from versioned_docs/version-2.0-2.4/security/rancher-2.4/rancher-2.4.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md index 137759fe75b..d15db3271d2 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.4/rancher-2.4.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.4) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1 ### Hardening Guide -This hardening [guide](./hardening-2.4) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md new file mode 100644 index 00000000000..24432dfc544 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md @@ -0,0 +1,30 @@ +--- +title: Resources +weight: 5 +aliases: +- /rancher/v2.0-v2.4/en/installation/options +--- + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Advanced Options + +When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: + +| Advanced Option | Available as of | +| ----------------------------------------------------------------------------------------------------------------------- | --------------- | +| [Custom CA Certificate](installation/options/custom-ca-root-certificate/) | v2.0.0 | +| [API Audit Log](installation/options/api-audit-log/) | v2.0.0 | +| [TLS Settings](installation/options/tls-settings/) | v2.1.7 | +| [etcd configuration](installation/options/etcd/) | v2.2.0 | +| [Local System Charts for Air Gap Installations](installation/options/local-system-charts) | v2.3.0 | diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md similarity index 60% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md index 586c827112a..15fa601180a 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md @@ -22,25 +22,25 @@ The following cloud providers can be enabled: ### Setting up the Amazon Cloud Provider -For details on enabling the Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon) +For details on enabling the Amazon cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) ### Setting up the Azure Cloud Provider -For details on enabling the Azure cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure) +For details on enabling the Azure cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md) ### Setting up the GCE Cloud Provider -For details on enabling the Google Compute Engine cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce) +For details on enabling the Google Compute Engine cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) ### Setting up the vSphere Cloud Provider -For details on enabling the vSphere cloud provider, refer to [this page.](./vsphere) +For details on enabling the vSphere cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) ### Setting up a Custom Cloud Provider The `Custom` cloud provider is available if you want to configure any [Kubernetes cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). -For the custom cloud provider option, you can refer to the [RKE docs]({{}}/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : +For the custom cloud provider option, you can refer to the [RKE docs](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/) -* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack/) +* [vSphere](https://rancher.com/docs/rke/latest/en/config-options/cloud-providercluster-provisioning/rke-clusters/cloud-providers/vsphere/) +* [OpenStack](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md similarity index 67% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md index c4346db11fc..033fc28dd0b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -24,9 +24,9 @@ Kubernetes Providers | Available as of | When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: -- [Creating a GKE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce) +- [Creating a GKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +- [Creating an EKS Cluster](../reference-guides/installation-references/amazon-eks-permissions.md) +- [Creating an AKS Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) +- [Creating an ACK Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +- [Creating a TKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) +- [Creating a CCE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/upgrades.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md similarity index 78% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/upgrades.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md index 6a6550ed8dc..4afaf46cfb5 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/upgrades/upgrades.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md @@ -22,11 +22,11 @@ import TabItem from '@theme/TabItem'; The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. -For the instructions to upgrade Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) +For the instructions to upgrade Rancher installed with Docker, refer to [this page.](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md) -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on). +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade](upgrades/upgrades/migrating-from-rke-add-on). - [Prerequisites](#prerequisites) - [Upgrade Outline](#upgrade-outline) @@ -49,25 +49,25 @@ Review the [known upgrade issues](#known-upgrade-issues) in the Rancher document A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren't supported. +Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren't supported. ### Helm Version The upgrade instructions assume you are using Helm 3. -For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. +For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here](installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. ### For air gap installs: Populate private registry --For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. +-For [air gap installs only,](air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. ### For upgrades from v2.0-v2.2 with external TLS termination -If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) +If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.](../reference-guides/installation-references/helm-chart-options.md#configuring-ingress-for-external-tls-when-using-nginx-v0-25) ### For upgrades with cert-manager older than 0.8.0 -[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) +[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](installation/options/upgrading-cert-manager) # Upgrade Outline @@ -81,7 +81,7 @@ Follow the steps to upgrade Rancher server: # 1. Back up Your Kubernetes Cluster that is Running Rancher Server -[Take a one-time snapshot]({{}}/rancher/v2.0-v2.4/en/backups/backup/rke-backups/#option-b-one-time-snapshots) +[Take a one-time snapshot](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#option-b-one-time-snapshots) of your Kubernetes cluster running Rancher server. You'll use the backup as a restoration point if something goes wrong during upgrade. @@ -96,7 +96,7 @@ You'll use the backup as a restoration point if something goes wrong during upgr 1. Get the repository name that you used to install Rancher. - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). + For information about the repos and their differences, see [Helm Chart Repositories](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). {{< release-channel >}} @@ -108,7 +108,7 @@ You'll use the backup as a restoration point if something goes wrong during upgr rancher- https://releases.rancher.com/server-charts/ ``` - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. 1. Fetch the latest chart to install Rancher from the Helm chart repository. @@ -180,7 +180,7 @@ If you are currently running the cert-manager whose version is older than v0.11, helm delete rancher -n cattle-system ``` -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) page. +2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager](installation/options/upgrading-cert-manager) page. 3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. @@ -265,7 +265,7 @@ Log into Rancher to confirm that the upgrade succeeded. >**Having network issues following upgrade?** > -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). +> See [Restoring Cluster Networking](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md#restoring-cluster-networking). # Known Upgrade Issues @@ -275,14 +275,14 @@ Upgrade Scenario | Issue ---|--- Upgrading to v2.4.6 or v2.4.7 | These Rancher versions had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. -Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). -Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. -Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration) +Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts](../getting-started/installation-and-upgrade/resources/local-system-charts.md). +Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. +Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md) # RKE Add-on Installs **Important: RKE add-on install is only supported up to Rancher v2.0.8** -Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). +Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](install-upgrade-on-a-kubernetes-cluster.md). -If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. +If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md similarity index 70% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md index 65e38b64310..6df90aa6059 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md @@ -10,7 +10,7 @@ aliases: When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. +To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. This section describes how to set up a custom cluster. @@ -18,7 +18,7 @@ This section describes how to set up a custom cluster. >**Want to use Windows hosts as Kubernetes workers?** > ->See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. +>See [Configuring Custom Clusters for Windows](use-windows-clusters.md) before you start. @@ -36,9 +36,9 @@ Begin creation of a custom cluster by provisioning a Linux host. Your host can b - An on-prem VM - A bare-metal server -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. +If you want to reuse a node from a previous custom cluster, [clean the node](admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. -Provision the host according to the [installation requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production) +Provision the host according to the [installation requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) and the [checklist for production-ready clusters.](checklist-for-production-ready-clusters.md) ### 2. Create the Custom Cluster @@ -54,7 +54,7 @@ Provision the host according to the [installation requirements]({{}}/ra >**Using Windows nodes as Kubernetes workers?** > - >- See [Enable the Windows Support Option]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). + >- See [Enable the Windows Support Option](use-windows-clusters.md). >- The only Network Provider available for clusters with Windows support is Flannel. 6. Click **Next**. @@ -62,10 +62,10 @@ Provision the host according to the [installation requirements]({{}}/ra >**Notes:** > - >- Using Windows nodes as Kubernetes workers? See [this section]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). + >- Using Windows nodes as Kubernetes workers? See [this section](use-windows-clusters.md). >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). -8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.0-v2.4/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. +8. **Optional**: Click **[Show advanced options](admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. 9. Copy the command displayed on screen to your clipboard. @@ -122,5 +122,5 @@ Key=kubernetes.io/cluster/CLUSTERID, Value=shared After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/node-pools.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md similarity index 87% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/node-pools.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md index 89778a7738f..14550b9601f 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/node-pools.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -5,11 +5,11 @@ aliases: - /rancher/v2.0-v2.4/en/concepts/global-configuration/node-templates/ --- -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. -The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). +The available cloud providers to create a node template are decided based on active [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers). This section covers the following topics: @@ -136,10 +136,10 @@ Node templates can use cloud credentials to store credentials for launching node - Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. -> **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. +> **Note:** As of v2.2.0, the default `active` [node drivers](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/). +After cloud credentials are created, the user can start [managing the cloud credentials that they created](../reference-guides/user-settings/manage-cloud-credentials.md). # Node Drivers -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). +If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#activating-deactivating-node-drivers), or you can [add your own custom node driver](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#adding-custom-node-drivers). diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md similarity index 81% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md index db83f053eb3..4f28114367b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md @@ -5,7 +5,7 @@ weight: 2240 _Available as of v2.3.0_ -When provisioning a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. +When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. @@ -31,7 +31,7 @@ This guide covers the following topics: # Requirements for Windows Clusters -The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). +The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation](installation-requirements.md). ### OS and Docker Requirements @@ -61,13 +61,13 @@ Rancher will not provision the node if the node does not meet these requirements ### Networking Requirements -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.0-v2.4/en/installation/) before proceeding with this guide. +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation](installation-and-upgrade.md) before proceeding with this guide. Rancher only supports Windows using Flannel as the network provider. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. -For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. +For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. @@ -97,18 +97,18 @@ Windows requires that containers must be built on the same Windows Server versio ### Cloud Provider Specific Requirements -If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. +If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page](cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: -- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce) +- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) - When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. # Tutorial: How to Create a Cluster with Windows Support This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) -When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. +When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. @@ -142,11 +142,11 @@ You will provision three nodes: | Node 2 | Linux (Ubuntu Server 18.04 recommended) | | Node 3 | Windows (Windows Server core version 1809 or above required) | -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.](cluster-provisioning/rke-clusters/options/cloud-providers) # 2. Create the Cluster on Existing Nodes -The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) with some Windows-specific requirements. +The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster](use-existing-nodes.md) with some Windows-specific requirements. 1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. 1. Click **From existing nodes (Custom)**. @@ -157,7 +157,7 @@ The instructions for creating a Windows cluster on existing nodes are very simil 1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. 1. Click **Next**. -> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. +> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. # 3. Add Nodes to the Cluster @@ -171,7 +171,7 @@ The first node in your cluster should be a Linux host has both the **Control Pla 1. In the **Node Operating System** section, click **Linux**. 1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.0-v2.4/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent](admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) 1. Copy the command displayed on the screen to your clipboard. 1. SSH into your Linux host and run the command that you copied to your clipboard. 1. When you are finished provisioning your Linux node(s), select **Done**. @@ -234,9 +234,9 @@ You can add Windows hosts to the cluster by editing the cluster and choosing the After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. # Configuration for Storage Classes in Azure -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass) \ No newline at end of file +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md new file mode 100644 index 00000000000..bf76a7e591c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md @@ -0,0 +1,18 @@ +--- +title: User Settings +weight: 23 +aliases: + - /rancher/v2.0-v2.4/en/tasks/user-settings/ +--- + +Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. + +![User Settings Menu](/img/user-settings.png) + +The available user settings are: + +- [API & Keys](../reference-guides/user-settings/api-keys.md): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. +- [Cloud Credentials](../reference-guides/user-settings/manage-cloud-credentials.md): Manage cloud credentials [used by node templates](use-new-nodes-in-an-infra-provider.md#node-templates) to [provision nodes for clusters](launch-kubernetes-with-rancher.md). Note: Available as of v2.2.0. +- [Node Templates](../reference-guides/user-settings/manage-node-templates.md): Manage templates [used by Rancher to provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Preferences](../reference-guides/user-settings/user-preferences.md): Sets superficial preferences for the Rancher UI. +- Log Out: Ends your user session. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md similarity index 72% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md index 963a18ac459..92a5c79f91b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md @@ -27,7 +27,7 @@ The vSphere node templates have been updated, allowing you to bring cloud operat _Available as of v2.3.0_ -One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. +One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,](use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. > **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. @@ -37,7 +37,7 @@ _Available as of v2.3.3_ Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. -For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) +For the fields to be populated, your setup needs to fulfill the [prerequisites.](cluster-provisioning/rke-clusters/node-pools/vsphercluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) ### More Supported Operating Systems @@ -53,14 +53,14 @@ In this YouTube video, we demonstrate how to set up a node template with the new # Creating a vSphere Cluster -In [this section,](./provisioning-vsphere-clusters) you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. +In [this section,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. # Provisioning Storage -For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) +For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) # Enabling the vSphere Cloud Provider When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. -For details, refer to the section on [enabling the vSphere cloud provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file +For details, refer to the section on [enabling the vSphere cloud provider.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/workloads.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md similarity index 92% rename from versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/workloads.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md index 412ffe112a5..b1916615b1f 100644 --- a/versioned_docs/version-2.0-2.4/k8s-in-rancher/workloads/workloads.md +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md @@ -72,9 +72,9 @@ There are several types of services available in Rancher. The descriptions below This section of the documentation contains instructions for deploying workloads and using workload options. -- [Deploy Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads/) +- [Deploy Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md) +- [Upgrade Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) +- [Rollback Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) ## Related Links diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/deployment.md b/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/deployment.md deleted file mode 100644 index f7d4da476aa..00000000000 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/deployment/deployment.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Deploying Rancher Server -weight: 100 ---- - -Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - -- [DigitalOcean](./digital-ocean-qs) (uses Terraform) -- [AWS](./amazon-aws-qs) (uses Terraform) -- [Azure](./microsoft-azure-qs) (uses Terraform) -- [GCP](./google-gcp-qs) (uses Terraform) -- [Vagrant](./quickstart-vagrant) - -If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. - -- [Manual Install](./quickstart-manual-setup) diff --git a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/workload.md b/versioned_docs/version-2.0-2.4/quick-start-guide/workload/workload.md deleted file mode 100644 index a3be7493b6c..00000000000 --- a/versioned_docs/version-2.0-2.4/quick-start-guide/workload/workload.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Deploying Workloads -weight: 200 ---- - -These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - -- [Workload with Ingress](./quickstart-deploy-workload-ingress) -- [Workload with NodePort](./quickstart-deploy-workload-nodeport) diff --git a/versioned_docs/version-2.0-2.4/reference-guides.md b/versioned_docs/version-2.0-2.4/reference-guides.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/api/api-tokens/api-tokens.md b/versioned_docs/version-2.0-2.4/reference-guides/about-the-api/api-tokens.md similarity index 100% rename from versioned_docs/version-2.0-2.4/api/api-tokens/api-tokens.md rename to versioned_docs/version-2.0-2.4/reference-guides/about-the-api/api-tokens.md diff --git a/versioned_docs/version-2.0-2.4/best-practices/containers/containers.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/containers.md similarity index 100% rename from versioned_docs/version-2.0-2.4/best-practices/containers/containers.md rename to versioned_docs/version-2.0-2.4/reference-guides/best-practices/containers.md diff --git a/versioned_docs/version-2.0-2.4/best-practices/deployment-strategies/deployment-strategies.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md similarity index 93% rename from versioned_docs/version-2.0-2.4/best-practices/deployment-strategies/deployment-strategies.md rename to versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md index 6a11761bfc7..950fc01a4f1 100644 --- a/versioned_docs/version-2.0-2.4/best-practices/deployment-strategies/deployment-strategies.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md @@ -17,7 +17,7 @@ There are two recommended deployment strategies. Each one has its own pros and c In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} +![](/img/bpg/hub-and-spoke.png) ### Pros @@ -34,7 +34,7 @@ In this deployment scenario, there is a single Rancher control plane managing Ku --- In the regional deployment model a control plane is deployed in close proximity to the compute nodes. -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} +![](/img/bpg/regional.png) ### Pros diff --git a/versioned_docs/version-2.0-2.4/best-practices/deployment-types/deployment-types.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md similarity index 79% rename from versioned_docs/version-2.0-2.4/best-practices/deployment-types/deployment-types.md rename to versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md index 34f1f0c173a..c7a217854c5 100644 --- a/versioned_docs/version-2.0-2.4/best-practices/deployment-types/deployment-types.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md @@ -17,7 +17,7 @@ Don't run other workloads or microservices in the Kubernetes cluster that Ranche ### Don't Run Rancher on a Hosted Kubernetes Environment When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. -It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE]({{}}/rke/latest/en/etcd-snapshots/) or [Rancher]({{}}/rancher/v2.0-v2.4/en/backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. +It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE](https://rancher.com/docs/rke/latest/en/etcd-snapshots/) or [Rancher](backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. ### Make sure nodes are configured correctly for Kubernetes ### It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) @@ -32,11 +32,11 @@ For best performance, run all three of your nodes in the same geographic datacen It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. ### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements](../../pages-for-subheaders/installation-requirements.md) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. -After you [enable monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. +After you [enable monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel](../../explanations/integrations-in-rancher/notifiers.md) and [cluster alerts](cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. diff --git a/versioned_docs/version-2.0-2.4/best-practices/management/management.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md similarity index 93% rename from versioned_docs/version-2.0-2.4/best-practices/management/management.md rename to versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md index 5e23755f729..1f4d04112c0 100644 --- a/versioned_docs/version-2.0-2.4/best-practices/management/management.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md @@ -23,7 +23,7 @@ Rancher allows you to set up numerous combinations of configurations. Some confi These tips can help you solve problems before they happen. ### Run Rancher on a Supported OS and Supported Docker Version -Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. +Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation](../../pages-for-subheaders/installation-requirements.md) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. ### Upgrade Your Kubernetes Version Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). @@ -42,14 +42,14 @@ Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2 All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. ### Renew Certificates Before they Expire -Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) to track certificate expiration. +Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms](../rancher-cluster-tools.md) to track certificate expiration. Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. -Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/). +Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface](../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md). ### Enable Recurring Snapshots for Backing up and Restoring the Cluster -Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation]({{}}/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups]({{}}/rancher/v2.0-v2.4/en/backups/). +Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md). ### Provision Clusters with Rancher When possible, use Rancher to provision your Kubernetes cluster rather than importing a cluster. This will ensure the best compatibility and supportability. @@ -91,13 +91,13 @@ Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the Provision three or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. ### Monitor Your Cluster -Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. +Closely monitor and scale your nodes as needed. You should [enable cluster monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. # Tips for Security Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: -- Rancher's [security documentation and Kubernetes cluster hardening guide]({{}}/rancher/v2.0-v2.4/en/security/) +- Rancher's [security documentation and Kubernetes cluster hardening guide](../../pages-for-subheaders/rancher-security.md) - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) ### Update Rancher with Security Patches diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md similarity index 58% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md index 2fefb8bff57..bcc3023047c 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md @@ -17,7 +17,7 @@ In the **Region** field, select the same region that you used when creating your ### Cloud Credentials -Your AWS account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) +Your AWS account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. @@ -27,9 +27,9 @@ See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs. See our three example JSON policies: -- [Example IAM Policy]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. ### Authenticate & Configure Nodes @@ -39,13 +39,13 @@ Choose an availability zone and network settings for your cluster. Choose the default security group or configure a security group. -Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. +Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. ### Instance Options Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. ### Engine Options @@ -66,9 +66,9 @@ See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs. See our three example JSON policies: -- [Example IAM Policy]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. ### Zone and Network @@ -76,7 +76,7 @@ See our three example JSON policies: ### Security Groups -**Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. +**Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. ### Instance @@ -88,7 +88,7 @@ Make sure you configure the correct **SSH User** for the configured AMI. ### IAM Instance Profile Name -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. ### Docker Daemon diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md similarity index 100% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/prior-to-2.0.4.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md similarity index 92% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/prior-to-2.0.4.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md index f66a4fa794f..e46da1f8466 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/prior-to-2.0.4.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md @@ -50,9 +50,9 @@ Ensure that the OS ISO URL contains the URL of the VMware ISO release for Ranche | CPUs | * | Number of vCPUS to assign to VMs. | | Memory | * | Amount of memory to assign to VMs. | | Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| | OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | # Disk UUIDs @@ -65,7 +65,7 @@ To enable disk UUIDs for all VMs created for a cluster, 3. Under **Instance Options** click on **Add Parameter**. 4. Enter `disk.enableUUID` as key with a value of **TRUE**. - {{< img "/img/rke/vsphere-nodedriver-enable-uuid.png" "vsphere-nodedriver-enable-uuid" >}} + ![](/img/rke/vsphere-nodedriver-enable-uuid.png) 5. Click **Create** or **Save**. diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/v2.0.4.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md similarity index 92% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/v2.0.4.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md index 658c575eaad..83f7c9b58bf 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/v2.0.4.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md @@ -46,9 +46,9 @@ Ensure that the OS ISO URL contains the URL of the VMware ISO release for Ranche | CPUs | * | Number of vCPUS to assign to VMs. | | Memory | * | Amount of memory to assign to VMs. | | Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| | OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | # Node Tags and Custom Attributes diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/v2.2.0.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/v2.2.0.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md index feab925f580..a5352740e85 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/v2.2.0.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md @@ -15,7 +15,7 @@ aliases: | Parameter | Required | Description | |:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | Your cloud credential has these fields: @@ -50,9 +50,9 @@ Ensure that the OS ISO URL contains the URL of the VMware ISO release for Ranche | CPUs | * | Number of vCPUS to assign to VMs. | | Memory | * | Amount of memory to assign to VMs. | | Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| | OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | # Node Tags and Custom Attributes diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/v2.3.0.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md similarity index 90% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/v2.3.0.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md index 829e7edea51..c664e2f2465 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/v2.3.0.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md @@ -15,7 +15,7 @@ aliases: | Parameter | Required | Description | |:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | Your cloud credential has these fields: @@ -56,9 +56,9 @@ Ensure that the OS ISO URL contains the URL of the VMware ISO release for Ranche | CPUs | * | Number of vCPUS to assign to VMs. | | Memory | * | Amount of memory to assign to VMs. | | Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| | OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | # Node Tags and Custom Attributes diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/v2.3.3.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md similarity index 96% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/v2.3.3.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md index 9b4c5390378..a9d55fe1e5b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/v2.3.3.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md @@ -16,7 +16,7 @@ aliases: | Parameter | Required | Description | |:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | Your cloud credential has these fields: @@ -52,7 +52,7 @@ In the **Instance Options** section, configure the number of vCPUs, memory, and | Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | | Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | | Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | ### About VM Creation Methods diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/options.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md similarity index 67% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/options.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md index 7ede5064def..5b64ad1eead 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/options/options.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -3,7 +3,7 @@ title: RKE Cluster Configuration Reference weight: 2250 --- -When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) as the Kubernetes distribution. +When Rancher installs Kubernetes, it uses [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) as the Kubernetes distribution. This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. @@ -12,7 +12,7 @@ You can configure the Kubernetes options one of two ways: - [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. - [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. -In Rancher v2.0.0-v2.2.x, the RKE cluster config file in Rancher is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) +In Rancher v2.0.0-v2.2.x, the RKE cluster config file in Rancher is identical to the [cluster config file for the Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) This section is a cluster configuration reference, covering the following topics: @@ -39,7 +39,7 @@ This section is a cluster configuration reference, covering the following topics # Rancher UI Options -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. +When creating a cluster using one of the options described in [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can configure basic Kubernetes options using the **Cluster Options** section. ### Kubernetes Version @@ -47,7 +47,7 @@ The version of Kubernetes installed on your cluster nodes. Rancher packages its ### Network Provider -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/). +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ](../../../faq/container-network-interface-providers.md). >**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. @@ -60,9 +60,9 @@ Out of the box, Rancher is compatible with the following network providers: **Notes on Canal:** -In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). +In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects](k8s-in-rancher/projects-and-namespaces/). -As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). +As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects](k8s-in-rancher/projects-and-namespaces/). >**Attention Rancher v2.0.0 - v2.0.6 Users** > @@ -75,13 +75,13 @@ In v2.0.5, this was the default option, which did not prevent any network isolat **Notes on Weave:** -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File](rke1-cluster-configuration.md#cluster-config-file) and the [Weave Network Plug-in Options](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). ### Kubernetes Cloud Providers -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. +You can configure a [Kubernetes cloud provider](cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage](k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: @@ -91,16 +91,16 @@ _Available as of v2.2.0_ The cluster-level private registry configuration is only used for provisioning clusters. -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.0-v2.4/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. +There are two main ways to set up private registries in Rancher: by setting up the [global default registry](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. +The private registry configuration option tells Rancher where to pull the [system images](https://rancher.com/docs/rke/latest/en/config-options/system-images/) or [addon images](https://rancher.com/docs/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - **System images** are components needed to maintain the Kubernetes cluster. - **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. -See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. +See the [RKE documentation on private registries](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. ### Authorized Cluster Endpoint @@ -108,17 +108,17 @@ _Available as of v2.2.0_ Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. -> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are imported into Rancher; it is available only on Rancher-launched Kubernetes clusters. +> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE](rke1-cluster-configuration.md#authorized-cluster-endpoint). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are imported into Rancher; it is available only on Rancher-launched Kubernetes clusters. This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) ### Node Pools -For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) +For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) # Advanced Options @@ -126,7 +126,7 @@ The following options are available when you create clusters in the Rancher UI. ### NGINX Ingress -Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). +Option to enable or disable the [NGINX ingress controller](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/). ### Node Port Range @@ -134,15 +134,15 @@ Option to change the range of ports that can be used for [NodePort services](htt ### Metrics Server Monitoring -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). +Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). ### Pod Security Policy Support -Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. +Option to enable and select a default [Pod Security Policy](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). You must have an existing Pod Security Policy configured before you can use this option. ### Docker Version on Nodes -Option to require [a supported Docker version]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. +Option to require [a supported Docker version](../../../pages-for-subheaders/installation-requirements.md) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. ### Docker Root Directory @@ -150,24 +150,24 @@ If the nodes you are adding to the cluster have Docker configured with a non-def ### Recurring etcd Snapshots -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). +Option to enable or disable [recurring etcd snapshots](https://rancher.com/docs/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). # Cluster Config File -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available](https://rancher.com/docs/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. >**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. - To read from an existing RKE file, click **Read from a file**. -![image]({{}}/img/rancher/cluster-options-yaml.png) +![image](/img/cluster-options-yaml.png) The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. ### Config File Structure in Rancher v2.3.0+ -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,](https://rancher.com/docs/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below.
Example Cluster Config File for Rancher v2.3.0+ @@ -351,7 +351,7 @@ ssh_agent_auth: false ### Default DNS provider -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider](https://rancher.com/docs/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. | Rancher version | Kubernetes version | Default DNS provider | |-------------|--------------------|----------------------| @@ -371,7 +371,7 @@ See [Docker Root Directory](#docker-root-directory). ### enable_cluster_monitoring -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/). +Option to enable or disable [Cluster Monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/). ### enable_network_policy @@ -394,11 +394,11 @@ local_cluster_auth_endpoint: _Available as of v2.2.4_ -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. +You can add a custom network plug-in by using the [user-defined add-on functionality](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. There are two ways that you can specify an add-on: -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) +- [In-line Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md similarity index 93% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md rename to versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md index 716fdbaaafd..0675c6ea9f5 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md @@ -6,9 +6,9 @@ aliases: - /rancher/v2.0-v2.4/en/cluster-provisioning/custom-clusters/agent-options --- -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. +Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes](../../../../pages-for-subheaders/use-existing-nodes.md) and add the options to the generated `docker run` command when adding a node. -For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#3-node-agents) +For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.](../../../../pages-for-subheaders/rancher-manager-architecture.md#3-node-agents) ## General options diff --git a/versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap-config/openldap-config.md b/versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md similarity index 90% rename from versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap-config/openldap-config.md rename to versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md index 2e28c77e456..9efc94d203d 100644 --- a/versioned_docs/version-2.0-2.4/admin-settings/authentication/openldap/openldap-config/openldap-config.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md @@ -7,7 +7,7 @@ This section is intended to be used as a reference when setting up an OpenLDAP a For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) -> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). +> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users](../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). - [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) - [OpenLDAP server configuration](#openldap-server-configuration) @@ -30,7 +30,7 @@ You will need to enter the address, port, and protocol to connect to your OpenLD > > If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. -If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. +If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation.
OpenLDAP Server Parameters
@@ -51,7 +51,7 @@ If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. -If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. +If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. ### User Schema Configuration diff --git a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md similarity index 98% rename from versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md rename to versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md index 1e7fba46ed2..c28cf2123fe 100644 --- a/versioned_docs/version-2.0-2.4/cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md @@ -54,7 +54,7 @@ The figure below illustrates the high-level architecture of Rancher 2.x. The fig
Managing Kubernetes Clusters through Rancher's Authentication Proxy
-![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) +![Architecture](/img/rancher-architecture-rancher-api-server.svg) # Create the EKS Cluster @@ -221,8 +221,8 @@ Documented here is a minimum set of permissions necessary to use all functionali Resource | Description ---------|------------ -Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#service-role-permissions). -VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#vpc-permissions). +Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions](amazon-eks-permissions.md#service-role-permissions). +VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions](amazon-eks-permissions.md#vpc-permissions). Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/chart-options/chart-options.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md similarity index 92% rename from versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/chart-options/chart-options.md rename to versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md index f027000a6cb..16663dd3c15 100644 --- a/versioned_docs/version-2.0-2.4/installation/install-rancher-on-k8s/chart-options/chart-options.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md @@ -10,9 +10,9 @@ aliases: This page is a configuration reference for the Rancher Helm chart. -For help choosing a Helm chart version, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/) +For help choosing a Helm chart version, refer to [this page.](../../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) -For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/feature-flags/) +For information on enabling experimental features, refer to [this page.](../../pages-for-subheaders/enable-experimental-features.md) - [Common Options](#common-options) - [Advanced Options](#advanced-options) @@ -46,7 +46,7 @@ For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) level. 0 is off. [0-3] | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](installation/api-auditing) level. 0 is off. [0-3] | | `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | | `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | @@ -73,15 +73,15 @@ For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing/). +Enabling the [API Audit Log](installation/api-auditing/). -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. ```plain --set auditLog.level=1 ``` -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the Rancher server cluster or System Project. Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. @@ -103,7 +103,7 @@ To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` a --set 'extraEnv[0].value=1.0' ``` -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. +See [TLS settings](admin-settings/tls-settings) for more information and options. ### Import `local` Cluster @@ -164,8 +164,8 @@ kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca- For details on installing Rancher with a private registry, see: -- [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) +- [Air Gap: Docker Install](installation/air-gap-single-node/) +- [Air Gap: Kubernetes Install](installation/air-gap-high-availability/) # External TLS Termination @@ -173,7 +173,7 @@ We recommend configuring your load balancer as a Layer 4 balancer, forwarding pl You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. diff --git a/versioned_docs/version-2.0-2.4/installation/resources/tls-settings/tls-settings.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md similarity index 88% rename from versioned_docs/version-2.0-2.4/installation/resources/tls-settings/tls-settings.md rename to versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md index 3cd06647f1e..7b076dfc821 100644 --- a/versioned_docs/version-2.0-2.4/installation/resources/tls-settings/tls-settings.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md @@ -13,9 +13,9 @@ In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. -- [TLS settings in Docker options]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#tls-settings) +- [TLS settings in Docker options](../single-node-rancher-in-docker/advanced-options.md#tls-settings) -- [TLS settings in Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#tls-settings) +- [TLS settings in Helm chart options](helm-chart-options.md#tls-settings) # TLS Environment Variables diff --git a/versioned_docs/version-2.0-2.4/overview/concepts/concepts.md b/versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md similarity index 98% rename from versioned_docs/version-2.0-2.4/overview/concepts/concepts.md rename to versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md index c637928995d..c6813e7ba08 100644 --- a/versioned_docs/version-2.0-2.4/overview/concepts/concepts.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md @@ -61,7 +61,7 @@ Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs - **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. - **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/). +Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads](../pages-for-subheaders/workloads-and-pods.md). # About Helm diff --git a/versioned_docs/version-2.0-2.4/pipelines/concepts/concepts.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/concepts.md similarity index 100% rename from versioned_docs/version-2.0-2.4/pipelines/concepts/concepts.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/concepts.md diff --git a/versioned_docs/version-2.0-2.4/pipelines/storage/storage.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md similarity index 93% rename from versioned_docs/version-2.0-2.4/pipelines/storage/storage.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md index adaaa81ef17..8dcbcef9c0f 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/storage/storage.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md @@ -10,11 +10,11 @@ import TabItem from '@theme/TabItem'; The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/how-storage-works/) +This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) >**Prerequisites (for both parts A and B):** > ->[Persistent volumes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) must be available for the cluster. +>[Persistent volumes](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) must be available for the cluster. ### A. Configuring Persistent Data for Docker Registry diff --git a/versioned_docs/version-2.0-2.4/pipelines/example-repos/example-repos.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md similarity index 90% rename from versioned_docs/version-2.0-2.4/pipelines/example-repos/example-repos.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md index 4c0393fecdf..8321430b7a8 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/example-repos/example-repos.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md @@ -12,7 +12,7 @@ Rancher ships with several example repositories that you can use to familiarize - Maven - php -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines). +> **Note:** The example repositories are only available if you have not [configured a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md). To start using these example repositories, @@ -74,4 +74,4 @@ After enabling an example repository, run the pipeline to see how it works. ### What's Next? -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines), enable a repository and finally configure your pipeline. +For detailed information about setting up your own pipeline for your repository, [configure a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md), enable a repository and finally configure your pipeline. diff --git a/versioned_docs/version-2.0-2.4/pipelines/example/example.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md similarity index 90% rename from versioned_docs/version-2.0-2.4/pipelines/example/example.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md index e94e24171b4..02ba54ce73d 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/example/example.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md @@ -8,7 +8,7 @@ aliases: Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. -In the [pipeline configuration reference]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. +In the [pipeline configuration reference](k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. Below is a full example `rancher-pipeline.yml` for those who want to jump right in. diff --git a/versioned_docs/version-2.0-2.4/pipelines/config/config.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md similarity index 94% rename from versioned_docs/version-2.0-2.4/pipelines/config/config.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md index bef328823fa..65436b10d07 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/config/config.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md @@ -158,7 +158,7 @@ stages: _Available as of v2.2.0_ -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{}}/rancher/v2.0-v2.4/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository](catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. ### Configuring Publishing a Catalog Template by UI @@ -309,7 +309,7 @@ timeout: 30 # Notifications -You can enable notifications to any [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/) so it will be easy to add recipients immediately. +You can enable notifications to any [notifiers](../../explanations/integrations-in-rancher/notifiers.md) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers](monitoring-alerting/legacy/notifiers/) so it will be easy to add recipients immediately. ### Configuring Notifications by UI @@ -319,7 +319,7 @@ _Available as of v2.2.0_ 1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. -1. If you don't have any existing [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. +1. If you don't have any existing [notifiers](../../explanations/integrations-in-rancher/notifiers.md), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions](monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. @@ -522,7 +522,7 @@ stages: # Secrets -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/). +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md). ### Prerequisite Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. @@ -653,8 +653,8 @@ If you want to use a version control provider with a certificate from a custom/i The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage) +For details on setting up persistent storage for pipelines, refer to [this page.](k8s-in-rancher/pipelines/storage) # Example rancher-pipeline.yml -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example) +An example pipeline configuration file is on [this page.](k8s-in-rancher/pipelines/example) diff --git a/versioned_docs/version-2.0-2.4/pipelines/docs-for-v2.0.x/docs-for-v2.0.x.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md similarity index 97% rename from versioned_docs/version-2.0-2.4/pipelines/docs-for-v2.0.x/docs-for-v2.0.x.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md index e4c584e74bf..18001af57cf 100644 --- a/versioned_docs/version-2.0-2.4/pipelines/docs-for-v2.0.x/docs-for-v2.0.x.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md @@ -8,7 +8,7 @@ aliases: - /rancher/v2.x/en/pipelines/docs-for-v2.0.x/ --- ->**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/). +>**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later](k8s-in-rancher/pipelines/). diff --git a/versioned_docs/version-2.0-2.4/cluster-admin/tools/tools.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md similarity index 72% rename from versioned_docs/version-2.0-2.4/cluster-admin/tools/tools.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md index 8318bf11b38..c45b2b9638b 100644 --- a/versioned_docs/version-2.0-2.4/cluster-admin/tools/tools.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md @@ -2,7 +2,7 @@ title: Tools for Logging, Monitoring, and More weight: 2033 aliases: - - /rancher/v2.0-v2.4/en/tools/notifiers-and-alerts/ + - /rancher/v2.0-v2.4/en/toolcluster-admin/tools/notifiers-and-alerts/ --- Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: @@ -32,40 +32,40 @@ Logging is helpful because it allows you to: Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. -Refer to the logging documentation [here.](./cluster-logging) +Refer to the logging documentation [here.](../pages-for-subheaders/cluster-logging.md) # Monitoring Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. -For details, refer to [Monitoring.](./cluster-monitoring) +For details, refer to [Monitoring.](../pages-for-subheaders/cluster-monitoring.md) # Alerts After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. Alerts are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. -For details, refer to [Alerts.](./cluster-alerts) +For details, refer to [Alerts.](../pages-for-subheaders/cluster-alerts.md) # Notifiers Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. -For details, refer to [Notifiers.](./notifiers) +For details, refer to [Notifiers.](../explanations/integrations-in-rancher/notifiers.md) # Istio _Available as of v2.3_ -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. +[Istio](https:cluster-admin/tools/istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. -Refer to the Istio documentation [here.](./istio) +Refer to the Istio documentation [here.](../pages-for-subheaders/istio.md) # OPA Gatekeeper -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](./opa-gatekeeper) +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](../explanations/integrations-in-rancher/opa-gatekeeper.md) # CIS Scans Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. -Refer to the CIS scan documentation [here.](./cis-scans) \ No newline at end of file +Refer to the CIS scan documentation [here.](../pages-for-subheaders/cis-scans.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/overview/architecture-recommendations/architecture-recommendations.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md similarity index 84% rename from versioned_docs/version-2.0-2.4/overview/architecture-recommendations/architecture-recommendations.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md index dade3593e01..010866535e2 100644 --- a/versioned_docs/version-2.0-2.4/overview/architecture-recommendations/architecture-recommendations.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md @@ -22,7 +22,7 @@ If you have a Docker installation of Rancher, the node running the Rancher serve In Kubernetes installations of Rancher, the Rancher server cluster should also be separate from the user clusters. -![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) +![Separation of Rancher Server from User Clusters](/img/rancher-architecture-separation-of-rancher-server.svg) # Why HA is Better for Rancher in Production @@ -30,7 +30,7 @@ We recommend installing the Rancher server on a high-availability Kubernetes clu We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. -As of v2.4, Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)]({{}}/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)]({{}}/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. +As of v2.4, Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)](https://rancher.com/docs/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. Rancher versions before v2.4 need to be installed on an RKE cluster. @@ -41,7 +41,7 @@ If you are installing Rancher v2.4 for the first time, we recommend installing i The option to install Rancher on a K3s cluster is a feature introduced in Rancher v2.4. K3s is easy to install, with half the memory of Kubernetes, all in a binary less than 100 MB.
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) +![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) ### RKE Kubernetes Cluster Installations @@ -50,7 +50,7 @@ If you are installing Rancher before v2.4, you will need to install Rancher on a In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails.
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) +![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) # Recommended Load Balancer Configuration for Kubernetes Installations @@ -62,13 +62,13 @@ We recommend the following configurations for the load balancer and Ingress cont * The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment.
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) +![Rancher HA](/img/ha/rancher2ha.svg) # Environment for Kubernetes Installations It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) for running your workloads. It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. @@ -94,7 +94,7 @@ Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. -![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) +![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters](/img/rancher-architecture-node-roles.svg) RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. @@ -112,10 +112,10 @@ With that said, it is safe to use all three roles on three nodes when setting up Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. -For more best practices for downstream clusters, refer to the [production checklist]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.0-v2.4/en/best-practices/) +For more best practices for downstream clusters, refer to the [production checklist](../../pages-for-subheaders/checklist-for-production-ready-clusters.md) or our [best practices guide.](../../pages-for-subheaders/best-practices.md) # Architecture for an Authorized Cluster Endpoint -If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. +If you are using an [authorized cluster endpoint,](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. -If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/#creating-an-api-key) for more information. \ No newline at end of file +If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files](k8s-in-rancher/kubeconfig/) and [API keys](../user-settings/api-keys.md#creating-an-api-key) for more information. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md new file mode 100644 index 00000000000..fa42a3bae89 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/project-admin/tools/project-alerts/project-alerts.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md similarity index 85% rename from versioned_docs/version-2.0-2.4/project-admin/tools/project-alerts/project-alerts.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md index e3710de9d07..3423ccdc240 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/tools/project-alerts/project-alerts.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md @@ -10,11 +10,11 @@ aliases: To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) and [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) of events they need to address. -Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) must be configured at the cluster level. +Before you can receive alerts, one or more [notifier](../../explanations/integrations-in-rancher/notifiers.md) must be configured at the cluster level. -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. +Only [administrators](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can manage project alerts. This section covers the following topics: @@ -31,7 +31,7 @@ This section covers the following topics: # Alerts Scope -The scope for alerts can be set at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) or project level. +The scope for alerts can be set at either the [cluster level](cluster-admin/tools/alerts/) or project level. At the project level, Rancher monitors specific deployments and sends alerts for: @@ -42,14 +42,14 @@ At the project level, Rancher monitors specific deployments and sends alerts for # Default Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them is configured at the cluster level. +When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier](../../explanations/integrations-in-rancher/notifiers.md) for them is configured at the cluster level. | Alert | Explanation | |-------|-------------| | Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | | Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | -For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts) +For information on other default alerts, refer to the section on [cluster-level alerts.](cluster-admin/tools/alerts/default-alerts) # Adding Project Alerts @@ -65,7 +65,7 @@ For information on other default alerts, refer to the section on [cluster-level 1. Continue adding more alert rules to the group. -1. Finally, choose the [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) that send you alerts. +1. Finally, choose the [notifiers](../../explanations/integrations-in-rancher/notifiers.md) that send you alerts. - You can set up multiple notifiers. - You can change notifier recipients on the fly. @@ -202,7 +202,7 @@ You can disable these advanced options when configuring a specific rule. # Metric Expression Alerts _Available as of v2.2.4_ -If you enable [project monitoring]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. +If you enable [project monitoring](../../pages-for-subheaders/project-tools.md#monitoring), this alert type monitors for the overload from Prometheus expression querying. Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. diff --git a/versioned_docs/version-2.0-2.4/project-admin/tools/project-logging/project-logging.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md similarity index 80% rename from versioned_docs/version-2.0-2.4/project-admin/tools/project-logging/project-logging.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md index c4c54f55e3d..ecf521ad4d7 100644 --- a/versioned_docs/version-2.0-2.4/project-admin/tools/project-logging/project-logging.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md @@ -12,7 +12,7 @@ aliases: Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. -For background information about how logging integrations work, refer to the [cluster administration section.]({{}}/rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/#how-logging-integrations-work) +For background information about how logging integrations work, refer to the [cluster administration section.](logging/v2.0.x-v2.4.x/cluster-logging/#how-logging-integrations-work) Rancher supports the following services: @@ -24,7 +24,7 @@ Rancher supports the following services: >**Note:** You can only configure one logging service per cluster or per project. -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. +Only [administrators](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure Rancher to send Kubernetes logs to a logging service. # Requirements @@ -48,7 +48,7 @@ Setting up a logging service to collect logs from your cluster/project has sever You can configure logging at either cluster level or project level. -- [Cluster logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. +- [Cluster logging](cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), it also writes logs for all the Kubernetes system components. - Project logging writes logs for every pod in that particular project. @@ -66,11 +66,11 @@ Logs that are sent to your logging service are from the following locations: 1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: - - [Elasticsearch]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd/) + - [Elasticsearch](cluster-admin/tools/logging/elasticsearch/) + - [Splunk](cluster-admin/tools/logging/splunk/) + - [Kafka](cluster-admin/tools/logging/kafka/) + - [Syslog](cluster-admin/tools/logging/syslog/) + - [Fluentd](cluster-admin/tools/logging/fluentd/) 1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.1/hardening-2.1/hardening-2.1.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.1/hardening-2.1/hardening-2.1.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md index aedd034a598..2be7b2deefe 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.1/hardening-2.1/hardening-2.1.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md @@ -18,7 +18,7 @@ Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) -For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.1/). +For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x](security/benchmark-2.1/). ### Profile Definitions diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.1/benchmark-2.1/benchmark-2.1.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.1/benchmark-2.1/benchmark-2.1.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.2/hardening-2.2/hardening-2.2.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.2/hardening-2.2/hardening-2.2.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md index 768e53eccac..9d94cad8151 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.2/hardening-2.2/hardening-2.2.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md @@ -18,7 +18,7 @@ Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.1 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.2/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x](security/benchmark-2.2/). ### Profile Definitions diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.2/benchmark-2.2/benchmark-2.2.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.2/benchmark-2.2/benchmark-2.2.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/hardening-2.3.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/hardening-2.3.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md index 7f77f125450..ddf33d55f94 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/hardening-2.3.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md @@ -17,7 +17,7 @@ Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.1 [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x](security/benchmark-2.3/). ### Profile Definitions diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/benchmark-2.3.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/benchmark-2.3.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/hardening-2.3.3.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/hardening-2.3.3.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md index cb7949895a4..e142b73e328 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/hardening-2.3.3.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md @@ -18,7 +18,7 @@ Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1. [Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.3/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3](security/benchmark-2.3.3/). ### Profile Definitions diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/benchmark-2.3.3.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/benchmark-2.3.3.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/hardening-2.3.5.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/hardening-2.3.5.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md index 7065ae7b971..cafaf6f8b60 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/hardening-2.3.5.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md @@ -23,7 +23,7 @@ Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.5]({{< baseurl >}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.5/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.5](security/benchmark-2.3.5/). #### Known Issues diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/benchmark-2.3.5.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/benchmark-2.3.5.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.4/hardening-2.4/hardening-2.4.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md similarity index 99% rename from versioned_docs/version-2.0-2.4/security/rancher-2.4/hardening-2.4/hardening-2.4.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md index 31f5017acab..2b3ee57ba71 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.4/hardening-2.4/hardening-2.4.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md @@ -23,7 +23,7 @@ Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 This document provides prescriptive guidance for hardening a production installation of Rancher v2.4 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.4]({{< baseurl >}}/rancher/v2.0-v2.4/en/security/benchmark-2.4/). +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.4](security/benchmark-2.4/). #### Known Issues diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.4/benchmark-2.4/benchmark-2.4.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md similarity index 100% rename from versioned_docs/version-2.0-2.4/security/rancher-2.4/benchmark-2.4/benchmark-2.4.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/security/cve/cve.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md similarity index 98% rename from versioned_docs/version-2.0-2.4/security/cve/cve.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md index 41699809bdf..9d93aee20d1 100644 --- a/versioned_docs/version-2.0-2.4/security/cve/cve.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md @@ -10,7 +10,7 @@ Rancher is committed to informing the community of security issues in our produc | [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server, i.e. local server, and return the requested information. You are vulnerable if you are running any Rancher 2.x version. Only valid Rancher users who have some level of permission on the cluster can perform the request. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | | [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher where users were granted access to resources regardless of the resource's API group. For example Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. You are vulnerable if you are running any Rancher 2.x version. The extent of the exploit increases if there are other matching CRD resources installed in the cluster. There is no direct mitigation besides upgrading to the patched versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | | [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud credential ID that was valid for a given cloud provider could make requests against that cloud provider's API through the proxy API, and the cloud credential would be attached. You are vulnerable if you are running any Rancher 2.2.0 or above and use cloud credentials. The exploit is limited to valid Rancher users. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions](upgrades/rollbacks/). | | [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | | [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | | [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | diff --git a/versioned_docs/version-2.0-2.4/admin-settings/rke-templates/example-yaml/example-yaml.md b/versioned_docs/version-2.0-2.4/reference-guides/rke1-template-example-yaml.md similarity index 100% rename from versioned_docs/version-2.0-2.4/admin-settings/rke-templates/example-yaml/example-yaml.md rename to versioned_docs/version-2.0-2.4/reference-guides/rke1-template-example-yaml.md diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/advanced/advanced.md b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md similarity index 91% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/advanced/advanced.md rename to versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md index a924970a464..538d1be925a 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/advanced/advanced.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md @@ -3,7 +3,7 @@ title: Advanced Options for Docker Installs weight: 5 --- -When installing Rancher, there are several [advanced options]({{}}/rancher/v2.0-v2.4/en/installation/options/) that can be enabled: +When installing Rancher, there are several [advanced options](installation/options/) that can be enabled: - [Custom CA Certificate](#custom-ca-certificate) - [API Audit Log](#api-audit-log) @@ -39,7 +39,7 @@ The API Audit Log records all the user and system transactions made through Ranc The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. -See [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) for more information and options. +See [API Audit Log](installation/api-auditing) for more information and options. ``` docker run -d --restart=unless-stopped \ @@ -62,7 +62,7 @@ docker run -d --restart=unless-stopped \ rancher/rancher:latest ``` -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. +See [TLS settings](admin-settings/tls-settings) for more information and options. ### Air Gap diff --git a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/proxy/proxy.md b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md similarity index 94% rename from versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/proxy/proxy.md rename to versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md index d21818b4c63..f8884542ba2 100644 --- a/versioned_docs/version-2.0-2.4/installation/other-installation-methods/single-node-docker/proxy/proxy.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md @@ -20,7 +20,7 @@ Make sure `NO_PROXY` contains the network addresses, network address ranges and ## Docker Installation -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.0-v2.4/en/installation/single-node-install/) are: +Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation](installation/single-node-install/) are: - `localhost` - `127.0.0.1` diff --git a/versioned_docs/version-2.0-2.4/system-tools/system-tools.md b/versioned_docs/version-2.0-2.4/reference-guides/system-tools.md similarity index 83% rename from versioned_docs/version-2.0-2.4/system-tools/system-tools.md rename to versioned_docs/version-2.0-2.4/reference-guides/system-tools.md index b6b97e78ad6..d177aa1f161 100644 --- a/versioned_docs/version-2.0-2.4/system-tools/system-tools.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/system-tools.md @@ -3,7 +3,7 @@ title: System Tools weight: 22 --- -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) The tasks include: +System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters or [installations of Rancher on an RKE cluster.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) The tasks include: * Collect logging and system metrics from nodes. * Remove Kubernetes resources created by Rancher. @@ -41,7 +41,7 @@ After you download the tools, complete the following actions: # Logs -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). See [Troubleshooting]({{}}//rancher/v2.0-v2.4/en/troubleshooting/) for a list of core Kubernetes cluster components. +The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). See [Troubleshooting]({{}}//rancher/v2.0-v2.4/en/troubleshooting/) for a list of core Kubernetes cluster components. System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. @@ -61,7 +61,7 @@ The following are the options for the logs command: # Stats -The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). +The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. @@ -81,7 +81,7 @@ The following are the options for the stats command: # Remove ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.0-v2.4/en/backups/backups) before executing the command. +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](backups/backups) before executing the command. When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: @@ -101,7 +101,7 @@ When you install Rancher on a Kubernetes cluster, it will create Kubernetes reso When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.0-v2.4/en/backups/backups) before executing the command. +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](backups/backups) before executing the command. ``` ./system-tools remove --kubeconfig --namespace diff --git a/versioned_docs/version-2.0-2.4/user-settings/api-keys/api-keys.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md similarity index 92% rename from versioned_docs/version-2.0-2.4/user-settings/api-keys/api-keys.md rename to versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md index e3c72a54a86..258d0488163 100644 --- a/versioned_docs/version-2.0-2.4/user-settings/api-keys/api-keys.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md @@ -32,7 +32,7 @@ API Keys are composed of four components: _Available as of v2.4.6_ Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) for more information. 4. Click **Create**. @@ -46,7 +46,7 @@ API Keys are composed of four components: - Enter your API key information into the application that will send requests to the Rancher API. - Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli). +- API keys are used for API calls and [Rancher CLI](../../pages-for-subheaders/cli-with-rancher.md). ## Deleting API Keys diff --git a/versioned_docs/version-2.0-2.4/user-settings/cloud-credentials/cloud-credentials.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md similarity index 70% rename from versioned_docs/version-2.0-2.4/user-settings/cloud-credentials/cloud-credentials.md rename to versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md index 3040c1e825e..6d95c0f9ca2 100644 --- a/versioned_docs/version-2.0-2.4/user-settings/cloud-credentials/cloud-credentials.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md @@ -5,7 +5,7 @@ weight: 7011 _Available as of v2.2.0_ -When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. +When you create a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. @@ -13,7 +13,7 @@ Cloud credentials are only used by node templates if there are fields marked as You can create cloud credentials in two contexts: -- [During creation of a node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. +- [During creation of a node template](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for a cluster. - In the **User Settings** All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. @@ -23,11 +23,11 @@ All cloud credentials are bound to the user profile of who created it. They **ca 1. From your user settings, select **User Avatar > Cloud Credentials**. 1. Click **Add Cloud Credential**. 1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) in Rancher. +1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) in Rancher. 1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. 1. Click **Create**. -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). +**Result:** The cloud credential is created and can immediately be used to [create node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates). ## Updating a Cloud Credential @@ -37,11 +37,11 @@ When access credentials are changed or compromised, updating a cloud credential 1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. 1. Update the credential information and click **Save**. -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). +**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). ## Deleting a Cloud Credential -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. +In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates](manage-node-templates.md#deleting-a-node-template) that are still associated to that cloud credential. 1. From your user settings, select **User Avatar > Cloud Credentials**. 1. You can either individually delete a cloud credential or bulk delete. diff --git a/versioned_docs/version-2.0-2.4/user-settings/node-templates/node-templates.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md similarity index 59% rename from versioned_docs/version-2.0-2.4/user-settings/node-templates/node-templates.md rename to versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md index abda0996aba..31777be5f96 100644 --- a/versioned_docs/version-2.0-2.4/user-settings/node-templates/node-templates.md +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md @@ -3,9 +3,9 @@ title: Managing Node Templates weight: 7010 --- -When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: +When you provision a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: -- While [provisioning a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). +- While [provisioning a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). - At any time, from your [user settings](#creating-a-node-template-from-user-settings). When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. @@ -16,14 +16,14 @@ When you create a node template, it is bound to your user profile. Node template 1. Click **Add Template**. 1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). +**Result:** The template is configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). ## Updating a Node Template 1. From your user settings, select **User Avatar > Node Templates**. 1. Choose the node template that you want to edit and click the **⋮ > Edit**. - > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. + > **Note:** As of v2.2.0, the default `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use [cloud credentials](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. 1. Edit the required information and click **Save**. @@ -37,7 +37,7 @@ When creating new node templates from your user settings, you can clone an exist 1. Find the template you want to clone. Then select **⋮ > Clone**. 1. Complete the rest of the form. -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). +**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). ## Deleting a Node Template diff --git a/versioned_docs/version-2.0-2.4/user-settings/preferences/preferences.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/user-preferences.md similarity index 100% rename from versioned_docs/version-2.0-2.4/user-settings/preferences/preferences.md rename to versioned_docs/version-2.0-2.4/reference-guides/user-settings/user-preferences.md diff --git a/versioned_docs/version-2.0-2.4/v1.6-migration/run-migration-tool/migration-tools-ref/migration-tools-ref.md b/versioned_docs/version-2.0-2.4/reference-guides/v1.6-migration/migration-tools-cli-reference.md similarity index 100% rename from versioned_docs/version-2.0-2.4/v1.6-migration/run-migration-tool/migration-tools-ref/migration-tools-ref.md rename to versioned_docs/version-2.0-2.4/reference-guides/v1.6-migration/migration-tools-cli-reference.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md index 5897146658c..93db3d94148 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kuber ### Hardening Guide -This hardening [guide](./hardening-2.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md index 98c78426c60..8eb00861b0d 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.3.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kuberne ### Hardening Guide -This hardening [guide](./hardening-2.3.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md index e6b4582af91..b0351ac0549 100644 --- a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md @@ -7,7 +7,7 @@ aliases: ### Self Assessment Guide -This [guide](./benchmark-2.3.5) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ---------------------------|----------|---------|-------|----- @@ -15,7 +15,7 @@ Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kuberne ### Hardening Guide -This hardening [guide](./hardening-2.3.5) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ------------------------|----------------|-----------------------|------------------ diff --git a/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md index 6cca088dcb8..d709c5be32d 100644 --- a/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md +++ b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md @@ -5,4 +5,4 @@ aliases: - /rancher/v2.x/en/security/security-scan/ --- -The documentation about CIS security scans has moved [here.]({{}}/rancher/v2.0-v2.4/en/cis-scans) +The documentation about CIS security scans has moved [here.](cis-scans) diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/troubleshooting.md b/versioned_docs/version-2.0-2.4/troubleshooting.md similarity index 51% rename from versioned_docs/version-2.0-2.4/troubleshooting/troubleshooting.md rename to versioned_docs/version-2.0-2.4/troubleshooting.md index fca31a2b073..269c396b2ee 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/troubleshooting.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting.md @@ -5,7 +5,7 @@ weight: 26 This section contains information to help you troubleshoot issues when using Rancher. -- [Kubernetes components]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/) +- [Kubernetes components](pages-for-subheaders/kubernetes-components.md) If you need help troubleshooting core Kubernetes cluster components like: * `etcd` @@ -16,27 +16,27 @@ This section contains information to help you troubleshoot issues when using Ran * `kube-proxy` * `nginx-proxy` -- [Kubernetes resources]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/) +- [Kubernetes resources](troubleshooting/other-troubleshooting-tips/kubernetes-resources.md) Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. -- [Networking]({{}}/rancher/v2.0-v2.4/en/troubleshooting/networking/) +- [Networking](troubleshooting/other-troubleshooting-tips/networking.md) Steps to troubleshoot networking issues can be found here. -- [DNS]({{}}/rancher/v2.0-v2.4/en/troubleshooting/dns/) +- [DNS](troubleshooting/other-troubleshooting-tips/dns.md) When you experience name resolution issues in your cluster. -- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/rancherha/) +- [Troubleshooting Rancher installed on Kubernetes](troubleshooting/other-troubleshooting-tips/rancher-ha.md) - If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) + If you experience issues with your [Rancher server installed on Kubernetes](pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) -- [Imported clusters]({{}}/rancher/v2.0-v2.4/en/troubleshooting/imported-clusters/) +- [Imported clusters](troubleshooting/other-troubleshooting-tips/registered-clusters.md) - If you experience issues when [Importing Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) + If you experience issues when [Importing Kubernetes Clusters](how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) -- [Logging]({{}}/rancher/v2.0-v2.4/en/troubleshooting/logging/) +- [Logging](troubleshooting/other-troubleshooting-tips/logging.md) Read more about what log levels can be configured and how to configure a log level. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/kubernetes-components.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/kubernetes-components.md deleted file mode 100644 index 5754da979d0..00000000000 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/kubernetes-components.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Kubernetes Components -weight: 100 ---- - -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. - -This section includes troubleshooting tips in the following categories: - -- [Troubleshooting etcd Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/etcd) -- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/controlplane) -- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/nginx-proxy) -- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/worker-and-generic) - -# Kubernetes Component Diagram - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/controlplane/controlplane.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md similarity index 91% rename from versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/controlplane/controlplane.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md index 3a3ca045c89..5fc74e0aae7 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/controlplane/controlplane.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -29,7 +29,7 @@ bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." # Controlplane Container Logging -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election](../other-troubleshooting-tips/kubernetes-resources.md#kubernetes-leader-election) how to retrieve the current leader. The logging of the containers can contain information on what the problem could be. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/etcd/etcd.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/etcd/etcd.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/nginx-proxy/nginx-proxy.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/nginx-proxy/nginx-proxy.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/worker-and-generic/worker-and-generic.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/worker-and-generic/worker-and-generic.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/dns/dns.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md similarity index 92% rename from versioned_docs/version-2.0-2.4/troubleshooting/dns/dns.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md index 169b5d84104..8a91d3cba4c 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/dns/dns.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md @@ -7,7 +7,7 @@ The commands/steps listed on this page can be used to check name resolution issu Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. -Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.0-v2.4/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. +Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. ### Check if DNS pods are running @@ -196,7 +196,7 @@ services: > **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. -See [Editing Cluster as YAML]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: +See [Editing Cluster as YAML](../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: ``` kubectl delete pods -n kube-system -l k8s-app=kube-dns diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-resources/kubernetes-resources.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md similarity index 97% rename from versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-resources/kubernetes-resources.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md index dc50f14d30b..ea701c2c0df 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-resources/kubernetes-resources.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -3,7 +3,7 @@ title: Kubernetes resources weight: 101 --- -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. +The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. @@ -266,6 +266,6 @@ kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .statu ### Job does not complete -If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.]({{}}/rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) +If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/logging/logging.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/logging/logging.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/networking/networking.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md similarity index 90% rename from versioned_docs/version-2.0-2.4/troubleshooting/networking/networking.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md index 99d67e8846c..7b3a675cdea 100644 --- a/versioned_docs/version-2.0-2.4/troubleshooting/networking/networking.md +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md @@ -9,7 +9,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG ### Double check if all the required ports are opened in your (host) firewall -Double check if all the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +Double check if all the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. ### Check if overlay network is functioning correctly The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. @@ -86,7 +86,7 @@ To test the overlay network, you can launch the following `DaemonSet` definition wk1 can reach wk1 => End network overlay test ``` - If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened for `wk2`. + If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened for `wk2`. 6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. @@ -125,7 +125,7 @@ If there is no output, the cluster is not affected. |------------|------------| | GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | -If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: +If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration](upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: - NGINX ingress controller showing `504 Gateway Time-out` when accessed. - NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/rancherha/rancherha.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/rancherha/rancherha.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/imported-clusters/imported-clusters.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md similarity index 100% rename from versioned_docs/version-2.0-2.4/troubleshooting/imported-clusters/imported-clusters.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md diff --git a/versioned_docs/version-2.0-2.4/user-settings/user-settings.md b/versioned_docs/version-2.0-2.4/user-settings/user-settings.md deleted file mode 100644 index 0e15e7008b0..00000000000 --- a/versioned_docs/version-2.0-2.4/user-settings/user-settings.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: User Settings -weight: 23 -aliases: - - /rancher/v2.0-v2.4/en/tasks/user-settings/ ---- - -Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. - -![User Settings Menu]({{}}/img/rancher/user-settings.png) - -The available user settings are: - -- [API & Keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. -- [Cloud Credentials]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. -- [Node Templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). -- [Preferences]({{}}/rancher/v2.0-v2.4/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. -- Log Out: Ends your user session.