diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000000..5d49595fbf2
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,8 @@
+version: 2
+
+updates:
+ - package-ecosystem: gitsubmodule
+ schedule:
+ interval: "daily"
+ directory: /
+
\ No newline at end of file
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 0f835da48dd..d3ec3503503 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -10,7 +10,7 @@ Fixes #[issue_number]
- Verify if changes pertain to other versions of Rancher. If they do, finalize the edits on one version of the page, then apply the edits to the other versions.
-- If the pull request is dependent on an upcoming release, make sure to target the release branch instead of `main`.
+- If the pull request is dependent on an upcoming release, remember to add a "MERGE ON RELEASE" label and set the proper milestone.
## Description
@@ -24,4 +24,4 @@ Fixes #[issue_number]
\ No newline at end of file
+-->
diff --git a/.github/styles/suse-vale-styleguide b/.github/styles/suse-vale-styleguide
index 06f144fdfc7..45136e8ea14 160000
--- a/.github/styles/suse-vale-styleguide
+++ b/.github/styles/suse-vale-styleguide
@@ -1 +1 @@
-Subproject commit 06f144fdfc78c769a9d86db5a6c550d7e6fc17da
+Subproject commit 45136e8ea14ebbe0851ae87791f068507b02d636
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index f228eefae5f..c16494a853b 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -4,16 +4,18 @@ on:
push:
branches:
- main
+ paths-ignore:
+ - '**/README.md'
jobs:
- deploy:
- name: Deploy to GitHub Pages
+ build:
+ name: Build Docusaurus
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- - uses: actions/setup-node@v3
+ - uses: actions/setup-node@v4
with:
node-version: 18
cache: yarn
@@ -25,18 +27,25 @@ jobs:
NODE_OPTIONS: "--max_old_space_size=7168"
run: yarn build --no-minify
- # Popular action to deploy to GitHub Pages:
- # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- - name: Deploy to GitHub Pages
- uses: peaceiris/actions-gh-pages@v3
+ - name: Upload Build Artifact
+ uses: actions/upload-pages-artifact@v3
with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- # Build output to publish to the `gh-pages` branch:
- publish_dir: ./build
- # The following lines assign commit authorship to the official
- # GH-Actions bot for deploys to `gh-pages` branch:
- # https://github.com/actions/checkout/issues/13#issuecomment-724415212
- # The GH actions bot is used by default if you didn't specify the two fields.
- # You can swap them out with your own user credentials.
- user_name: github-actions[bot]
- user_email: 41898282+github-actions[bot]@users.noreply.github.com
\ No newline at end of file
+ path: build
+
+ deploy:
+ name: Deploy to GitHub Pages
+ needs: build
+
+ permissions:
+ pages: write
+ id-token: write
+
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+
+ runs-on: ubuntu-latest
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/.github/workflows/test-deploy.yml b/.github/workflows/test-deploy.yml
index 39e8fa97909..807286e11b6 100644
--- a/.github/workflows/test-deploy.yml
+++ b/.github/workflows/test-deploy.yml
@@ -2,16 +2,18 @@ name: Test deployment
on:
pull_request:
- branches:
- - main
+ paths-ignore:
+ - '**/README.md'
jobs:
test-deploy:
name: Test deployment
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-node@v3
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - uses: actions/setup-node@v4
with:
node-version: 18
cache: yarn
diff --git a/.github/workflows/vale.yml b/.github/workflows/vale.yml
index bdd2a453b3c..b004ab9d083 100644
--- a/.github/workflows/vale.yml
+++ b/.github/workflows/vale.yml
@@ -5,7 +5,10 @@
# It uses Vale (https://vale.sh/docs/vale-cli/installation/) to provide feedback base off the SUSE Style Guide / OpenSUSE style rules (https://github.com/openSUSE/suse-vale-styleguide)
name: Style check
-on: [pull_request]
+on:
+ pull_request:
+ paths-ignore:
+ - '**/README.md'
jobs:
vale-lint:
diff --git a/.vale.ini b/.vale.ini
index 84cf4b55591..b4d0a68741e 100644
--- a/.vale.ini
+++ b/.vale.ini
@@ -1,7 +1,7 @@
-StylesPath = .github/styles
+StylesPath = .github/styles/suse-vale-styleguide
[formtats]
mdx = md
[*.md]
-BasedOnStyles = suse-vale-styleguide
\ No newline at end of file
+BasedOnStyles = common
\ No newline at end of file
diff --git a/README.md b/README.md
index ca3db799c77..990554e68b0 100644
--- a/README.md
+++ b/README.md
@@ -15,9 +15,9 @@ To get started, [fork](https://github.com/rancher/rancher-docs/fork) and clone t
Our repository doesn't allow you to make changes directly to the `main` branch. Create a working branch and make pull requests from your fork to [rancher/rancher-docs](https://github.com/rancher/rancher-docs).
-For most updates, you'll need to edit a file in the `/docs` directory, which represents the ["Latest"](https://ranchermanager.docs.rancher.com/) version of our published documentation. The "Latest" version is a mirror of the most recently released version of Rancher. As of December 2023, the most recently released version of Rancher is 2.8.
+For most updates, you'll need to edit a file in the `/docs` directory, which represents the ["Latest"](https://ranchermanager.docs.rancher.com/) version of our published documentation. The "Latest" version is a mirror of the most recently released version of Rancher. As of August 2024, the most recently released version of Rancher is 2.9.
-Whenever an update is made to `/docs`, you should apply the same change to the corresponding file in `/versioned_docs/version-2.8`. If a change only affects older versions, you don't need to mirror it to the `/docs` directory.
+Whenever an update is made to `/docs`, you should apply the same change to the corresponding file in `/versioned_docs/version-2.9`. If a change only affects older versions, you don't need to mirror it to the `/docs` directory.
If a file is moved or renamed, you'll also need to edit the `sidebars.js` files for each affected version, as well as the list of redirects in `docusaurus.config.js`. See [Moving or Renaming Docs](./moving-or-renaming-docs.md).
diff --git a/docs/api/api-reference.mdx b/docs/api/api-reference.mdx
index 2ae392ecf47..242d5892e5e 100644
--- a/docs/api/api-reference.mdx
+++ b/docs/api/api-reference.mdx
@@ -1,5 +1,6 @@
---
title: API Reference
+hide_table_of_contents: true
---
diff --git a/docs/reference-guides/about-the-api/api-tokens.md b/docs/api/api-tokens.md
similarity index 81%
rename from docs/reference-guides/about-the-api/api-tokens.md
rename to docs/api/api-tokens.md
index 0568de9e149..877e798ceee 100644
--- a/docs/reference-guides/about-the-api/api-tokens.md
+++ b/docs/api/api-tokens.md
@@ -1,11 +1,13 @@
---
-title: API Tokens
+title: Using API Tokens
---
-
+
+Rancher v2.8.0 introduced the [Rancher Kubernetes API](./api-reference.mdx) which can be used to manage Rancher resources through `kubectl`. This page covers information on API tokens used with the [Rancher CLI](../reference-guides/cli-with-rancher/cli-with-rancher.md), [kubeconfig files](../how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md#about-the-kubeconfig-file), Terraform and the [v3 API browser](./v3-rancher-api-guide.md#enable-view-in-api).
+
By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password.
You can deactivate API tokens by deleting them or by deactivating the user account.
@@ -43,13 +45,11 @@ This setting is used by all kubeconfig tokens except those created by the CLI to
## Disable Tokens in Generated Kubeconfigs
-Set the `kubeconfig-generate-token` setting to `false`. This setting instructs Rancher to no longer automatically generate a token when a user clicks on download a kubeconfig file. When this setting is deactivated, a generated kubeconfig references the [Rancher CLI](../cli-with-rancher/kubectl-utility.md#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) to retrieve a short-lived token for the cluster. When this kubeconfig is used in a client, such as `kubectl`, the Rancher CLI needs to be installed to complete the log in request.
+Set the `kubeconfig-generate-token` setting to `false`. This setting instructs Rancher to no longer automatically generate a token when a user clicks on download a kubeconfig file. When this setting is deactivated, a generated kubeconfig references the [Rancher CLI](../reference-guides/cli-with-rancher/kubectl-utility.md#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) to retrieve a short-lived token for the cluster. When this kubeconfig is used in a client, such as `kubectl`, the Rancher CLI needs to be installed to complete the log in request.
## Token Hashing
-Users can enable token hashing, where tokens undergo a one-way hash using the SHA256 algorithm. This is a non-reversible process: once enabled, this feature cannot be disabled. It is advisable to take backups prior to enabling and/or evaluating in a test environment first.
-
-To enable token hashing, refer to [this section](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md).
+You can [enable token hashing](../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md), where tokens undergo a one-way hash using the SHA256 algorithm. This is a non-reversible process: once enabled, this feature cannot be disabled. You should first evaluate this setting in a test environment, and/or take backups before enabling.
This feature affects all tokens which include, but are not limited to, the following:
@@ -82,4 +82,4 @@ Maximum Time to Live (TTL) in minutes allowed for auth tokens. If a user attempt
### kubeconfig-generate-token
-When true, kubeconfigs requested through the UI contain a valid token. When false, kubeconfigs contain a command that uses the Rancher CLI to prompt the user to log in. [The CLI then retrieves and caches a token for the user](../cli-with-rancher/kubectl-utility.md#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl).
+When true, kubeconfigs requested through the UI contain a valid token. When false, kubeconfigs contain a command that uses the Rancher CLI to prompt the user to log in. [The CLI then retrieves and caches a token for the user](../reference-guides/cli-with-rancher/kubectl-utility.md#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl).
diff --git a/docs/api/quickstart.md b/docs/api/quickstart.md
index 9ec6a5b4611..5bb3057183e 100644
--- a/docs/api/quickstart.md
+++ b/docs/api/quickstart.md
@@ -1,12 +1,12 @@
---
-title: API Quick Start Guide
+title: RK-API Quick Start Guide
---
-You can access Rancher's resources through the Kubernetes API. This guide will help you get started on using this API as a Rancher user.
+You can access Rancher's resources through the Kubernetes API. This guide helps you get started on using this API as a Rancher user.
1. In the upper left corner, click **☰ > Global Settings**.
2. Find and copy the address in the `server-url` field.
@@ -129,7 +129,7 @@ To ensure that your tools can recognize Rancher's CA certificates, most setups r
If your Rancher instance is proxied by another service, you must extract the certificate that the service is using, and add it to the kubeconfig file, as demonstrated in step 5.
:::
-4. The following commands will convert `rancher.crt` to base64 output, trim all new-lines, and update the cluster in the kubeconfig with the certificate, then finishing by removing the `rancher.crt` file:
+4. The following commands convert `rancher.crt` to base64 output, trim all new-lines, and update the cluster in the kubeconfig with the certificate, then finish by removing the `rancher.crt` file:
```bash
export KUBECONFIG=$PATH_TO_RANCHER_KUBECONFIG
diff --git a/docs/api/v3-rancher-api-guide.md b/docs/api/v3-rancher-api-guide.md
new file mode 100644
index 00000000000..14bc92a5bed
--- /dev/null
+++ b/docs/api/v3-rancher-api-guide.md
@@ -0,0 +1,94 @@
+---
+title: Previous v3 Rancher API Guide
+---
+
+
+
+
+
+Rancher v2.8.0 introduced the Rancher Kubernetes API (RK-API). The previous v3 Rancher API is still available. This page describes the v3 API. For more information on RK-API, see the [RK-API quickstart](./quickstart.md) and [reference guide](./api-reference.mdx).
+
+## How to Use the API
+
+The previous v3 API has its own user interface accessible from a [web browser](#enable-view-in-api). This is an easy way to see resources, perform actions, and see the equivalent `curl` or HTTP request & response. To access it:
+
+
+
+
+1. Click your user avatar in the upper right corner.
+1. Click **Account & API Keys**.
+1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link looks something like `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment.
+
+
+
+
+Go to the URL endpoint at `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment.
+
+
+
+
+## Authentication
+
+API requests must include authentication information. Authentication is done with HTTP basic authentication using [API keys](../reference-guides/user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take.
+
+By default, certain cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](api-tokens.md).
+
+## Making Requests
+
+The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see further documentation](https://github.com/rancher/api-spec/blob/master/specification.md).
+
+- Every type has a Schema which describes:
+ - The URL to get to the collection of this type of resource.
+ - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc.
+ - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas).
+ - Every field that allows filtering.
+ - What HTTP verb methods are available for the collection itself, or for individual resources in the collection.
+
+The design allows you to load just the list of schemas and access everything about the API. The UI for the API contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and follow other `links` inside of the returned resources to get any other information.
+
+In practice, you may just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases.
+
+Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL where you can retrieve that information. Again, you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself.
+
+Most resources have actions, which do something or change the state of the resource. To use them, send a HTTP `POST` to the URL in the `actions` map of the action you want. Certain actions require input or produce output. See the individual documentation for each type or the schemas for specific information.
+
+To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored.
+
+To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource.
+
+To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`).
+
+## Filtering
+
+Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, for example, `field_gt=42` for "field is greater than 42." See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details.
+
+## Sorting
+
+Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified.
+
+## Pagination
+
+API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, for example, `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not.
+
+## Capturing v3 API Calls
+
+You can use browser developer tools to capture how the v3 API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster:
+
+1. In the Rancher UI, go to **Cluster Management** and click **Create.**
+1. Click one of the cluster types. This example uses Digital Ocean.
+1. Fill out the form with a cluster name and node template, but don't click **Create**.
+1. You need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click the Rancher UI and click **Inspect.**
+1. In the developer tools, click the **Network** tab.
+1. On the **Network** tab, make sure **Fetch/XHR** is selected.
+1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`.
+1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.**
+1. Paste the result into any text editor. You can see the POST request, including the URL it was sent to, all headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: the request should be stored in a safe place because it contains credentials.
+
+### Enable View in API
+
+You can also view captured v3 API calls for your respective clusters and resources. This feature is not enabled by default. To enable it:
+
+1. Click your **User Tile** in the top right corner of the UI and select **Preferences** from the drop-down menu.
+2. Under the **Advanced Features** section, click **Enable "View in API"**
+
+Once checked, the **View in API** link is displayed under the **⋮** sub-menu on resource pages in the UI.
diff --git a/docs/cluster-provisioning/rke-clusters/options/options.md b/docs/cluster-provisioning/rke-clusters/options/options.md
deleted file mode 100644
index da8df215bd2..00000000000
--- a/docs/cluster-provisioning/rke-clusters/options/options.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: RKE Cluster Configuration
----
-
-
-
-
-
-This page has moved [here.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)
diff --git a/docs/faq/container-network-interface-providers.md b/docs/faq/container-network-interface-providers.md
index c90060a6456..a3e40c8456a 100644
--- a/docs/faq/container-network-interface-providers.md
+++ b/docs/faq/container-network-interface-providers.md
@@ -96,7 +96,7 @@ Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` a
For more information, see the following pages:
-- [Weave Net Official Site](https://www.weave.works/)
+- [Weave Net Official Site](https://github.com/weaveworks/weave/blob/master/site/overview.md)
### RKE2 Kubernetes clusters
diff --git a/docs/faq/deprecated-features.md b/docs/faq/deprecated-features.md
index afda3bef11b..bef8d016578 100644
--- a/docs/faq/deprecated-features.md
+++ b/docs/faq/deprecated-features.md
@@ -6,21 +6,20 @@ title: Deprecated Features in Rancher
-### What is Rancher's deprecation policy?
+## What is Rancher's deprecation policy?
We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms).
-### Where can I find out which features have been deprecated in Rancher?
+## Where can I find out which features have been deprecated in Rancher?
Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features:
| Patch Version | Release Date |
|---------------|---------------|
-| [2.8.3](https://github.com/rancher/rancher/releases/tag/v2.8.3) | Mar 28, 2024 |
-| [2.8.2](https://github.com/rancher/rancher/releases/tag/v2.8.2) | Feb 8, 2024 |
-| [2.8.1](https://github.com/rancher/rancher/releases/tag/v2.8.1) | Jan 22, 2024 |
-| [2.8.0](https://github.com/rancher/rancher/releases/tag/v2.8.0) | Dec 6, 2023 |
+| [2.9.2](https://github.com/rancher/rancher/releases/tag/v2.9.2) | Sep 19, 2024 |
+| [2.9.1](https://github.com/rancher/rancher/releases/tag/v2.9.1) | Aug 26, 2024 |
+| [2.9.0](https://github.com/rancher/rancher/releases/tag/v2.9.0) | Jul 31, 2024 |
-### What can I expect when a feature is marked for deprecation?
+## What can I expect when a feature is marked for deprecation?
In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature.
\ No newline at end of file
diff --git a/docs/faq/dockershim.md b/docs/faq/dockershim.md
index 4e710c9746b..387b20b3eee 100644
--- a/docs/faq/dockershim.md
+++ b/docs/faq/dockershim.md
@@ -18,19 +18,19 @@ enable_cri_dockerd: true
For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24.
-### FAQ
+## FAQ
-Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim?
+Q: Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim?
The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on Rancher 2.6 or above to have support for RKE with Kubernetes 1.21. See our [support matrix](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/) for details.
-Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim?
+Q: I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim?
-A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and is not scheduled for removal upstream until Kubernetes 1.24. It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to Kubernetes 1.21 as you would normally, but should consider enabling the external Dockershim by Kubernetes 1.22. The external Dockershim will need to be enabled before upgrading to Kubernetes 1.24, at which point the existing implementation will be removed.
+A: The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and is not scheduled for removal upstream until Kubernetes 1.24. It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to Kubernetes 1.21 as you would normally, but should consider enabling the external Dockershim by Kubernetes 1.22. The external Dockershim will need to be enabled before upgrading to Kubernetes 1.24, at which point the existing implementation will be removed.
For more information on the deprecation and its timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed).
diff --git a/docs/faq/general-faq.md b/docs/faq/general-faq.md
index 146761ac85f..5930cb4ed7d 100644
--- a/docs/faq/general-faq.md
+++ b/docs/faq/general-faq.md
@@ -10,10 +10,6 @@ This FAQ is a work in progress designed to answer the questions most frequently
See the [Technical FAQ](technical-items.md) for frequently asked technical questions.
-## Does Rancher v2.x support Docker Swarm and Mesos as environment types?
-
-Swarm and Mesos are no longer selectable options when you create a new environment in Rancher v2.x. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 were running Swarm.
-
## Is it possible to manage Azure Kubernetes Services with Rancher v2.x?
Yes. See our [Cluster Administration](../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md) guide for what Rancher features are available on AKS, as well as our [documentation on AKS](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md).
diff --git a/docs/faq/install-and-configure-kubectl.md b/docs/faq/install-and-configure-kubectl.md
index 9bcb56bc3d2..868eb42caac 100644
--- a/docs/faq/install-and-configure-kubectl.md
+++ b/docs/faq/install-and-configure-kubectl.md
@@ -8,11 +8,11 @@ title: Installing and Configuring kubectl
`kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x.
-### Installation
+## Installation
See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system.
-### Configuration
+## Configuration
When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`.
diff --git a/docs/faq/rancher-is-no-longer-needed.md b/docs/faq/rancher-is-no-longer-needed.md
index 3f825b0f048..1752bd29b37 100644
--- a/docs/faq/rancher-is-no-longer-needed.md
+++ b/docs/faq/rancher-is-no-longer-needed.md
@@ -9,11 +9,11 @@ title: Rancher is No Longer Needed
This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted.
-### If the Rancher server is deleted, what happens to the workloads in my downstream clusters?
+## If the Rancher server is deleted, what happens to the workloads in my downstream clusters?
If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal.
-### If the Rancher server is deleted, how do I access my downstream clusters?
+## If the Rancher server is deleted, how do I access my downstream clusters?
The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize:
@@ -21,7 +21,7 @@ The capability to access a downstream cluster without Rancher depends on the typ
- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials.
- **RKE clusters:** To access an [RKE cluster,](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed.
-### What if I don't want Rancher anymore?
+## What if I don't want Rancher anymore?
:::note
@@ -44,7 +44,7 @@ If you installed Rancher with Docker, you can uninstall Rancher by removing the
Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters)
-### What if I don't want my registered cluster managed by Rancher?
+## What if I don't want my registered cluster managed by Rancher?
If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher.
@@ -56,7 +56,7 @@ To detach the cluster,
**Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher.
-### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?
+## What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?
At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher.
diff --git a/docs/faq/security.md b/docs/faq/security.md
index 08fd8422730..165fea2ba91 100644
--- a/docs/faq/security.md
+++ b/docs/faq/security.md
@@ -1,21 +1,20 @@
---
title: Security FAQ
-
---
-### Is there a Hardening Guide?
+## Is there a Hardening Guide?
The Hardening Guide is located in the main [Security](../reference-guides/rancher-security/rancher-security.md) section.
-### Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results?
+## Have hardened Rancher Kubernetes clusters been evaluated by the CIS Kubernetes Benchmark? Where can I find the results?
We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../reference-guides/rancher-security/rancher-security.md) section.
-### How does Rancher verify communication with downstream clusters, and what are some associated security concerns?
+## How does Rancher verify communication with downstream clusters, and what are some associated security concerns?
Communication between the Rancher server and downstream clusters is performed through agents. Rancher uses either a registered certificate authority (CA) bundle or the local trust store to verify communication between Rancher agents and the Rancher server. Using a CA bundle for verification is more strict, as only the certificates based on that bundle are trusted. If TLS verification for a explicit CA bundle fails, Rancher may fall back to using the local trust store for verifying future communication. Any CA within the local trust store can then be used to generate a valid certificate.
diff --git a/docs/faq/technical-items.md b/docs/faq/technical-items.md
index 42bfe966726..20e714a4930 100644
--- a/docs/faq/technical-items.md
+++ b/docs/faq/technical-items.md
@@ -6,9 +6,10 @@ title: Technical FAQ
-### How can I reset the administrator password?
+## How can I reset the administrator password?
+
+Docker install:
-Docker Install:
```
$ docker exec -ti reset-password
New password for default administrator (user-xxxxx):
@@ -16,6 +17,7 @@ New password for default administrator (user-xxxxx):
```
Kubernetes install (Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password
@@ -23,10 +25,10 @@ New password for default administrator (user-xxxxx):
```
+## I deleted/deactivated the last admin, how can I fix it?
+Docker install:
-### I deleted/deactivated the last admin, how can I fix it?
-Docker Install:
```
$ docker exec -ti ensure-default-admin
New default administrator (user-xxxxx)
@@ -35,38 +37,40 @@ New password for default administrator (user-xxxxx):
```
Kubernetes install (Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin
New password for default administrator (user-xxxxx):
```
-### How can I enable debug logging?
+
+## How can I enable debug logging?
See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md)
-### My ClusterIP does not respond to ping
+## My ClusterIP does not respond to ping
ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds.
-### Where can I manage Node Templates?
+## Where can I manage Node Templates?
Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`.
-### Why is my Layer-4 Load Balancer in `Pending` state?
+## Why is my Layer-4 Load Balancer in `Pending` state?
The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)
-### Where is the state of Rancher stored?
+## Where is the state of Rancher stored?
- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`.
- Kubernetes install: in the etcd of the RKE cluster created to run Rancher.
-### How are the supported Docker versions determined?
+## How are the supported Docker versions determined?
We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md.
-### How can I access nodes created by Rancher?
+## How can I access nodes created by Rancher?
SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below.
@@ -78,14 +82,14 @@ Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host.
$ ssh -i id_rsa user@ip_of_node
```
-### How can I automate task X in Rancher?
+## How can I automate task X in Rancher?
The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this:
* Visit `https://your_rancher_ip/v3` and browse the API options.
* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like)
-### The IP address of a node changed, how can I recover?
+## The IP address of a node changed, how can I recover?
A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster.
@@ -93,11 +97,11 @@ When the IP address of the node changed, Rancher lost connection to the node, so
When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster.
-### How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster?
+## How can I add more arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster?
You can add more arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/).
-### How do I check if my certificate chain is valid?
+## How do I check if my certificate chain is valid?
Use the `openssl verify` command to validate your certificate chain:
@@ -138,7 +142,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com
issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA
```
-### How do I check `Common Name` and `Subject Alternative Names` in my server certificate?
+## How do I check `Common Name` and `Subject Alternative Names` in my server certificate?
Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications.
@@ -156,7 +160,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS
DNS:rancher.my.org
```
-### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed?
+## Why does it take 5+ minutes for a pod to be rescheduled when a node has failed?
This is due to a combination of the following default Kubernetes settings:
@@ -175,6 +179,6 @@ In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. Se
* `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration.
* `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration.
-### Can I use keyboard shortcuts in the UI?
+## Can I use keyboard shortcuts in the UI?
Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI.
diff --git a/docs/faq/telemetry.md b/docs/faq/telemetry.md
index edfcaebed4e..64bcee4090f 100644
--- a/docs/faq/telemetry.md
+++ b/docs/faq/telemetry.md
@@ -6,11 +6,11 @@ title: Telemetry FAQ
-### What is Telemetry?
+## What is Telemetry?
Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties.
-### What information is collected?
+## What information is collected?
No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected.
@@ -24,12 +24,12 @@ The primary things collected include:
- The image name & version of Rancher that is running.
- A unique randomly-generated identifier for this installation.
-### Can I see the information that is being sent?
+## Can I see the information that is being sent?
If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data.
If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at.
-### How do I turn it on or off?
+## How do I turn it on or off?
After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`.
diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
index b519ebf2761..a3b48a0814d 100644
--- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
+++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
@@ -12,7 +12,7 @@ These instructions assume you have already followed the instructions for a Kuber
:::
-### Rancher Helm Upgrade Options
+## Rancher Helm Upgrade Options
To upgrade with Helm, apply the same options that you used when installing Rancher. Refer to the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools.
diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
index 8d367774725..8ccdb43d1bf 100644
--- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
+++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
@@ -107,15 +107,15 @@ The Rancher management server is designed to be secure by default and requires S
:::note
-If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../installation-references/helm-chart-options.md#external-tls-termination).
+If you want to externally terminate SSL/TLS, see [TLS termination on an External Load Balancer](../installation-references/helm-chart-options.md#external-tls-termination). As outlined on that page, this option does have additional requirements for TLS verification.
:::
There are three recommended options for the source of the certificate used for TLS termination at the Rancher server:
-- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate.
-- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet.
-- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher.
+- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. No extra action is needed when `agent-tls-mode` is set to strict. More information can be found on this setting in [Agent TLS Enforcement](../installation-references/tls-settings.md#agent-tls-enforcement).
+- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. When setting `agent-tls-mode` to `strict`, you must also specify `--privateCA=true` and upload the Let's Encrypt CA as described in [Adding TLS Secrets](../resources/add-tls-secrets.md). More information can be found on this setting in [Agent TLS Enforcement](../installation-references/tls-settings.md#agent-tls-enforcement).
+- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. If `agent-tls-mode` is set to `strict`, the CA must be uploaded, so that downstream clusters can successfully connect. More information can be found on this setting in [Agent TLS Enforcement](../installation-references/tls-settings.md#agent-tls-enforcement).
| Configuration | Helm Chart Option | Requires cert-manager |
@@ -148,7 +148,7 @@ To see options on how to customize the cert-manager install (including for cases
:::
```
-# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart:
+# If you have installed the CRDs manually, instead of setting `installCRDs` or `crds.enabled` to `true` in your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart:
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.crds.yaml
# Add the Jetstack Helm repository
@@ -161,7 +161,7 @@ helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
- --set installCRDs=true
+ --set crds.enabled=true
```
Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods:
@@ -242,6 +242,12 @@ In the following command,
- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc.
- For Kubernetes v1.25 or later, set `global.cattle.psp.enabled` to `false` when using Rancher v2.7.2-v2.7.4. This is not necessary for Rancher v2.7.5 and above, but you can still manually set the option if you choose.
+:::warning
+
+When `agent-tls-mode` is set to `strict` (the default value for new installs of Rancher starting from v2.9.0), you must supply the `privateCA=true` chart value (e.x. through `--set privateCA=true`) and upload the Let's Encrypt Certificate Authority as outlined in [Adding TLS Secrets](../resources/add-tls-secrets.md). Information on identifying the Let's Encrypt Root CA can be found in the Let's Encrypt [docs](https://letsencrypt.org/certificates/). If you don't upload the CA, then Rancher may fail to connect to new or existing downstream clusters.
+
+:::
+
```
helm install rancher rancher-/rancher \
--namespace cattle-system \
diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
index fe5deede281..e5334e94708 100644
--- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
+++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
@@ -49,7 +49,7 @@ See the [rancher/rancher-cleanup repo](https://github.com/rancher/rancher-cleanu
### Step 2: Restore the Backup and Bring Up Rancher
At this point, there should be no Rancher-related resources on the upstream cluster. Therefore, the next step will be the same as if you were migrating Rancher to a new cluster that contains no Rancher resources.
-/home/btat/rancher-docs/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
+
Follow these [instructions](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) to install the Rancher-Backup Helm chart and restore Rancher to its previous state.
Please keep in mind that:
1. Step 3 can be skipped, because the Cert-Manager app should still exist on the upstream (local) cluster if it was installed before.
diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
index 9ebbf27460f..4fa1f09c8f6 100644
--- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
+++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
@@ -190,3 +190,19 @@ If you want to use encrypted private keys, you should use `ssh-agent` to load yo
### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
The node is not reachable on the configured `address` and `port`.
+
+### Agent reports TLS errors
+
+When using Rancher, you may encounter error messages from the `fleet-agent`, `system-agent`, or `cluster-agent`, such as the message below:
+```
+tls: failed to verify certificate: x509: failed to load system roots and no roots provided; readdirent /dev/null: not a directory
+```
+
+This occurs when Rancher was configured with `agent-tls-mode` set to `strict`, but couldn't find cacerts in the `cacert` setting. To resolve the issue, set the `agent-tls-mode` to `system-store`, or upload the CA for Rancher as described in [Adding TLS Secrets](../resources/add-tls-secrets.md).
+
+### New Cluster Deployment is stuck in "Waiting for Agent to check in"
+
+When Rancher has `agent-tls-mode` set to `strict`, new clusters may fail to provision and report a generic "Waiting for Agent to check in" error message. The root cause of this is similar to the above case of TLS errors - Rancher's agent can't determine which CA Rancher is using (or can't verify that Rancher's cert is actually signed by the specified certificate authority).
+
+To resolve the issue, set the `agent-tls-mode` to `system-store` or upload the CA for Rancher as described in [Adding TLS Secrets](../resources/add-tls-secrets.md).
+
diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md
index 6a5107aea05..e0db870bdc3 100644
--- a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md
+++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md
@@ -49,7 +49,6 @@ For [air-gapped installs only,](../other-installation-methods/air-gapped-helm-cl
Follow the steps to upgrade Rancher server:
-
### 1. Back up Your Kubernetes Cluster that is Running Rancher Server
Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher.
@@ -119,7 +118,6 @@ If you are installing Rancher in an air-gapped environment, skip the rest of thi
:::
-
Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed.
```
diff --git a/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md
index 9661d616eba..8a5d5cff106 100644
--- a/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md
+++ b/docs/getting-started/installation-and-upgrade/installation-references/feature-flags.md
@@ -26,18 +26,24 @@ The following is a list of feature flags available in Rancher. If you've upgrade
- `multi-cluster-management`: Allows multi-cluster provisioning and management of Kubernetes clusters. This flag can only be set at install time. It can't be enabled or disabled later.
- `rke1-custom-node-cleanup`: Enables cleanup of deleted RKE1 custom nodes. We recommend that you keep this flag enabled, to prevent removed nodes from attempting to rejoin the cluster.
- `rke2`: Enables provisioning RKE2 clusters. This flag is enabled by default.
-- `token-hashing`: Enables token hashing. Once enabled, existing tokens will be hashed and all new tokens will be hashed automatically with the SHA256 algorithm. Once a token is hashed it can't be undone. This flag can't be disabled after its enabled. See [API Tokens](../../../reference-guides/about-the-api/api-tokens.md#token-hashing) for more information.
+- `token-hashing`: Enables token hashing. Once enabled, existing tokens will be hashed and all new tokens will be hashed automatically with the SHA256 algorithm. Once a token is hashed it can't be undone. This flag can't be disabled after its enabled. See [API Tokens](../../../api/api-tokens.md#token-hashing) for more information.
+- `uiextension`: Enables UI extensions. This flag is enabled by default. Enabling or disabling the flag forces the Rancher pod to restart. The first time this flag is set to `true`, it creates a CRD and enables the controllers and endpoints necessary for the feature to work. If set to `false`, it disables the previously mentioned controllers and endpoints. Setting `uiextension` to `false` has no effect on the CRD -- it does not create a CRD if it does not yet exist, nor does it delete the CRD if it already exists.
- `unsupported-storage-drivers`: Enables types for storage providers and provisioners that aren't enabled by default. See [Allow Unsupported Storage Drivers](../../../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md) for more information.
+- `ui-sql-cache`: Enables a SQLite-based cache for UI tables. See [UI Server-Side Pagination](../../../how-to-guides/advanced-user-guides/enable-experimental-features/ui-server-side-pagination.md) for more information.
+
The following table shows the availability and default values for some feature flags in Rancher. Features marked "GA" are generally available:
-| Feature Flag Name | Default Value | Status | Available As Of |
-| ----------------------------- | ------------- | ------------ | --------------- |
-| `continuous-delivery` | `true` | GA | v2.6.0 |
-| `fleet` | `true` | Can no longer be disabled | v2.6.0 |
-| `fleet` | `true` | GA | v2.5.0 |
-| `harvester` | `true` | Experimental | v2.6.1 |
-| `legacy` | `false` for new installs, `true` for upgrades | GA | v2.6.0 |
-| `rke1-custom-node-cleanup`| `true` | GA | v2.6.0 |
-| `rke2` | `true` | Experimental | v2.6.0 |
-| `token-hashing` | `false` for new installs, `true` for upgrades | GA | v2.6.0 |
+| Feature Flag Name | Default Value | Status | Available As Of | Additional Information |
+| ----------------------------- | ------------- | ------------ | --------------- | ---------------------- |
+| `continuous-delivery` | `true` | GA | v2.6.0 | |
+| `external-rules` | v2.7.14: `false`, v2.8.5: `true` | Removed | v2.7.14, v2.8.5 | This flag affected [external `RoleTemplate` behavior](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#external-roletemplate-behavior). It is removed in Rancher v2.9.0 and later as the behavior is enabled by default. |
+| `fleet` | `true` | Can no longer be disabled | v2.6.0 | |
+| `fleet` | `true` | GA | v2.5.0 | |
+| `harvester` | `true` | Experimental | v2.6.1 | |
+| `legacy` | `false` for new installs, `true` for upgrades | GA | v2.6.0 | |
+| `rke1-custom-node-cleanup`| `true` | GA | v2.6.0 | |
+| `rke2` | `true` | Experimental | v2.6.0 | |
+| `token-hashing` | `false` for new installs, `true` for upgrades | GA | v2.6.0 | |
+| `uiextension` | `true` | GA | v2.9.0 |
+| `ui-sql-cache` | `false` | Highly experimental | v2.9.0 |
\ No newline at end of file
diff --git a/docs/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md b/docs/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md
index 35ebfaa9082..d84c4e70792 100644
--- a/docs/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md
+++ b/docs/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md
@@ -32,6 +32,7 @@ For information on enabling experimental features, refer to [this page.](../../.
| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) |
| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" (upstream) Rancher server cluster. _Note: This option is no longer available in v2.5.0. Consider using the `restrictedAdmin` option to prevent users from modifying the local cluster._ |
+| `agentTLSMode` | "" | `string` - either `system-store` or `strict`. See [Agent TLS Enforcement](./tls-settings.md#agent-tls-enforcement) |
| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" |
| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" |
| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) |
@@ -206,7 +207,7 @@ You may terminate the SSL/TLS on a L7 load balancer external to the Rancher clus
:::note
-If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](../../../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to add the CA cert for Rancher.
+If you are using a Private CA signed certificate (or if `agent-tls-mode` is set to `strict`), add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](../../../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to add the CA cert for Rancher.
:::
diff --git a/docs/getting-started/installation-and-upgrade/installation-references/tls-settings.md b/docs/getting-started/installation-and-upgrade/installation-references/tls-settings.md
index 321cb524dbf..bbde2c61560 100644
--- a/docs/getting-started/installation-and-upgrade/installation-references/tls-settings.md
+++ b/docs/getting-started/installation-and-upgrade/installation-references/tls-settings.md
@@ -23,3 +23,82 @@ The default TLS configuration only accepts TLS 1.2 and secure TLS cipher suites.
|-----|-----|-----|-----|
| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2`, `1.3` |
| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`, `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`, `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) |
+
+## Agent TLS Enforcement
+
+The `agent-tls-mode` setting controls how Rancher's agents (`cluster-agent`, `fleet-agent`, and `system-agent`) validate Rancher's certificate.
+
+When the value is set to `strict`, Rancher's agents only trust certificates generated by the Certificate Authority contained in the `cacerts` setting.
+When the value is set to `system-store`, Rancher's agents trust any certificate generated by a public Certificate Authority contained in the operating system's trust store including those signed by authorities such as Let's Encrypt. This can be a security risk, since any certificate generated by these external authorities, which are outside the user's control, are considered valid in this state.
+
+While the `strict` option enables a higher level of security, it requires Rancher to have access to the CA which generated the certificate visible to the agents. In the case of certain certificate configurations (notably, external certificates), this is not automatic, and extra configuration is needed. See the [installation guide](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#3-choose-your-ssl-configuration) for more information on which scenarios require extra configuration.
+
+In Rancher v2.9.0 and later, this setting defaults to `strict` on new installs. For users installing or upgrading from a prior Rancher version, it is set to `system-store`.
+
+### Preparing for the Setting Change
+
+Each cluster contains a condition in the status field called `AgentTlsStrictCheck`. If `AgentTlsStrictCheck` is set to `"True"`, this indicates that the agents for the cluster are ready to operate in `strict` mode. You can manually inspect each cluster to see if they are ready using the Rancher UI or a kubectl command such as the following:
+
+```bash
+## the below command skips ouputs $CLUSTER_NAME,$STATUS for all non-local clusters
+kubectl get cluster.management.cattle.io -o jsonpath='{range .items[?(@.metadata.name!="local")]}{.metadata.name},{.status.conditions[?(@.type=="AgentTlsStrictCheck")].status}{"\n"}{end}'
+```
+
+### Changing the Setting
+
+You can change the setting using the Rancher UI or the `agentTLSMode` [helm chart option](./helm-chart-options.md).
+
+:::note
+
+If you specify the value through the Helm chart, you may only modify the value with Helm.
+
+:::
+
+:::warning
+
+Depending on your cert setup, additional action may be required, such as uploading the Certificate Authority which signed your certs. Review the [installation guide](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#3-choose-your-ssl-configuration) before changing the setting to see if any additional requirements apply to your setup.
+
+:::
+
+To change the setting's value through the UI, navigate to the **Global Settings** page, and find the `agent-tls-mode` setting near the bottom of the page. When you change the setting through the UI, Rancher first checks that all downstream clusters have the condition `AgentTlsStrictCheck` set to `"True"` before allowing the request. This prevents outages from a certificate mismatch.
+
+
+#### Overriding the Setting Validation Checks
+
+In some cases, you may want to override the check ensuring all agents can accept the new TLS configuration:
+
+:::warning
+
+Rancher checks the status of all downstream clusters to prevent outages. Overriding this check is not recommended, and should be done with great caution.
+
+:::
+
+1. As an admin, generate a kubeconfig for the local cluster. In the below examples, this was saved to the `local_kubeconfig.yaml` file.
+2. Retrieve the current setting and save it to `setting.yaml`:
+```bash
+kubectl get setting agent-tls-mode -o yaml --kubeconfig=local_kubeconfig.yaml > setting.yaml
+```
+3. Update the `setting.yaml` file, replacing `value` with `strict`. Adding the `cattle.io/force: "true"` annotation overrides the cluster condition check, and should only be done with great care:
+
+:::warning
+
+Including the `cattle.io/force` annotation with any value (including, for example `"false"`) overrides the cluster condition check.
+
+:::
+
+```yaml
+apiVersion: management.cattle.io/v3
+customized: false
+default: strict
+kind: Setting
+metadata:
+ name: agent-tls-mode
+ annotations:
+ cattle.io/force: "true"
+source: ""
+value: strict
+```
+4. Apply the new version of the setting:
+```bash
+kubectl apply -f setting.yaml --kubeconfig=local_kubeconfig.yaml
+```
diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
index 211141cb704..1578165e292 100644
--- a/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
+++ b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
@@ -22,7 +22,7 @@ Starting with version 1.24, the above defaults to true.
For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward.
-### FAQ
+## FAQ
@@ -46,6 +46,6 @@ A: You can use a runtime like containerd with Kubernetes that does not require D
Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options?
-A: Today, you can stand up a new cluster and migrate workloads to a new RKE2 cluster that uses containerd. Rancher is exploring the possibility of an in-place upgrade path.
+A: Today, you can stand up a new cluster and migrate workloads to a new RKE2 cluster that uses containerd. For details, see the [RKE to RKE2 Replatforming Guide](https://links.imagerelay.com/cdn/3404/ql/5606a3da2365422ab2250d348aa07112/rke_to_rke2_replatforming_guide.pdf).
diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md b/docs/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
index 9bbc10f9326..878db7c657b 100644
--- a/docs/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
+++ b/docs/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
@@ -216,6 +216,14 @@ Each node used should have a static IP configured, regardless of whether you are
To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements](port-requirements.md) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types.
+### Load Balancer Requirements
+
+If you use a load balancer, it should be be HTTP/2 compatible.
+
+To receive help from SUSE Support, Rancher Prime customers who use load balancers (or any other middleboxes such as firewalls), must use one that is HTTP/2 compatible.
+
+When HTTP/2 is not available, Rancher falls back to HTTP/1.1. However, since HTTP/2 offers improved web application performance, using HTTP/1.1 can create performance issues.
+
## Dockershim Support
For more information on Dockershim support, refer to [this page](dockershim.md).
diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
index 53bbdc4e9cc..e307e805b9e 100644
--- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
+++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
@@ -28,7 +28,7 @@ For security purposes, SSL (Secure Sockets Layer) is required when using Rancher
Choose from the following options:
-### Option A: Default Self-Signed Certificate
+## Option A: Default Self-Signed Certificate
Click to expand
@@ -55,7 +55,7 @@ docker run -d --restart=unless-stopped \
-### Option B: Bring Your Own Certificate: Self-Signed
+## Option B: Bring Your Own Certificate: Self-Signed
Click to expand
@@ -98,7 +98,7 @@ docker run -d --restart=unless-stopped \
-### Option C: Bring Your Own Certificate: Signed by Recognized CA
+## Option C: Bring Your Own Certificate: Signed by Recognized CA
Click to expand
@@ -143,8 +143,6 @@ docker run -d --restart=unless-stopped \
-
-
:::note
If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login.
diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
index 07b6b01097f..40e21d3186a 100644
--- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
+++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
@@ -25,7 +25,7 @@ We recommend setting up the following infrastructure for a high-availability ins
- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it.
- **A private image registry** to distribute container images to your machines.
-### 1. Set up Linux Nodes
+## 1. Set up Linux Nodes
These hosts will be disconnected from the internet, but require being able to connect with your private registry.
@@ -33,7 +33,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS,
For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2.
-### 2. Set up External Datastore
+## 2. Set up External Datastore
The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case.
@@ -49,7 +49,7 @@ For an example of one way to set up the database, refer to this [tutorial](../..
For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/)
-### 3. Set up the Load Balancer
+## 3. Set up the Load Balancer
You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server.
@@ -72,7 +72,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance
:::
-### 4. Set up the DNS Record
+## 4. Set up the DNS Record
Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer.
@@ -82,7 +82,7 @@ You will need to specify this hostname in a later step when you install Rancher,
For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)
-### 5. Set up a Private Image Registry
+## 5. Set up a Private Image Registry
Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing container images to your machines.
@@ -106,13 +106,13 @@ To install the Rancher management server on a high-availability RKE cluster, we
These nodes must be in the same region/data center. You may place these servers in separate availability zones.
-### Why three nodes?
+## Why Three Nodes?
In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes.
The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes.
-### 1. Set up Linux Nodes
+## 1. Set up Linux Nodes
These hosts will be disconnected from the internet, but require being able to connect with your private registry.
@@ -120,7 +120,7 @@ Make sure that your nodes fulfill the general installation requirements for [OS,
For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2.
-### 2. Set up the Load Balancer
+## 2. Set up the Load Balancer
You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server.
@@ -143,7 +143,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance
:::
-### 3. Set up the DNS Record
+## 3. Set up the DNS Record
Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer.
@@ -153,7 +153,7 @@ You will need to specify this hostname in a later step when you install Rancher,
For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)
-### 4. Set up a Private Image Registry
+## 4. Set up a Private Image Registry
Rancher supports air gap installs using a secure private registry. You must have your own private registry or other means of distributing container images to your machines.
@@ -176,7 +176,7 @@ If you need to create a private registry, refer to the documentation pages for y
:::
-### 1. Set up a Linux Node
+## 1. Set up a Linux Node
This host will be disconnected from the Internet, but needs to be able to connect to your private registry.
@@ -184,7 +184,7 @@ Make sure that your node fulfills the general installation requirements for [OS,
For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2.
-### 2. Set up a Private Docker Registry
+## 2. Set up a Private Docker Registry
Rancher supports air gap installs using a private registry on your bastion server. You must have your own private registry or other means of distributing container images to your machines.
@@ -193,4 +193,4 @@ If you need help with creating a private registry, please refer to the [official
-### [Next: Collect and Publish Images to your Private Registry](publish-images.md)
+## [Next: Collect and Publish Images to your Private Registry](publish-images.md)
diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md
index 2189e94d095..bef60c2fe4a 100644
--- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md
+++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md
@@ -23,14 +23,15 @@ The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are sh
In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server.
-### Installation Outline
+## Installation Outline
1. [Prepare Images Directory](#1-prepare-images-directory)
2. [Create Registry YAML](#2-create-registry-yaml)
3. [Install K3s](#3-install-k3s)
4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file)
-### 1. Prepare Images Directory
+## 1. Prepare Images Directory
+
Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running.
Place the tar file in the `images` directory before starting K3s on each node, for example:
@@ -40,7 +41,8 @@ sudo mkdir -p /var/lib/rancher/k3s/agent/images/
sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/
```
-### 2. Create Registry YAML
+## 2. Create Registry YAML
+
Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry.
The registries.yaml file should look like this before plugging in the necessary information:
@@ -66,7 +68,7 @@ Note, at this time only secure registries are supported with K3s (SSL with custo
For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/)
-### 3. Install K3s
+## 3. Install K3s
Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [Rancher Support Matrix](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/).
@@ -98,7 +100,7 @@ K3s additionally provides a `--resolv-conf` flag for kubelets, which may help wi
:::
-### 4. Save and Start Using the kubeconfig File
+## 4. Save and Start Using the kubeconfig File
When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location.
@@ -138,7 +140,7 @@ kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces
For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files.
-### Note on Upgrading
+## Note on Upgrading
Upgrading an air-gap environment can be accomplished in the following manner:
@@ -151,14 +153,15 @@ Upgrading an air-gap environment can be accomplished in the following manner:
In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server.
-### Installation Outline
+## Installation Outline
1. [Create RKE2 configuration](#1-create-rke2-configuration)
2. [Create Registry YAML](#2-create-registry-yaml)
3. [Install RKE2](#3-install-rke2)
4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file)
-### 1. Create RKE2 configuration
+## 1. Create RKE2 configuration
+
Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster.
On the first server the minimum config is:
@@ -186,7 +189,8 @@ RKE2 additionally provides a `resolv-conf` option for kubelets, which may help w
:::
-### 2. Create Registry YAML
+## 2. Create Registry YAML
+
Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry.
The registries.yaml file should look like this before plugging in the necessary information:
@@ -210,7 +214,7 @@ configs:
For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration)
-### 3. Install RKE2
+## 3. Install RKE2
Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/)
@@ -239,7 +243,7 @@ systemctl start rke2-server.service
For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap).
-### 4. Save and Start Using the kubeconfig File
+## 4. Save and Start Using the kubeconfig File
When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location.
@@ -279,7 +283,7 @@ kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces
For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files.
-### Note on Upgrading
+## Note on Upgrading
Upgrading an air-gap environment can be accomplished in the following manner:
@@ -291,7 +295,7 @@ Upgrading an air-gap environment can be accomplished in the following manner:
We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file.
-### 1. Install RKE
+## 1. Install RKE
Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/)
@@ -301,7 +305,7 @@ Certified version(s) of RKE based on the Rancher version can be found in the [Ra
:::
-### 2. Create an RKE Config File
+## 2. Create an RKE Config File
From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`.
@@ -352,7 +356,7 @@ private_registries:
is_default: true
```
-### 3. Run RKE
+## 3. Run RKE
After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster:
@@ -360,7 +364,7 @@ After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster:
rke up --config ./rancher-cluster.yml
```
-### 4. Save Your Files
+## 4. Save Your Files
:::note Important:
@@ -383,8 +387,8 @@ The "rancher-cluster" parts of the two latter file names are dependent on how yo
:::
-### Issues or errors?
+## Issues or Errors?
See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page.
-### [Next: Install Rancher](install-rancher-ha.md)
+## [Next: Install Rancher](install-rancher-ha.md)
diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
index 99c4332b633..7aa31190731 100644
--- a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
+++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
@@ -8,7 +8,7 @@ title: 4. Install Rancher
This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy.
-### Privileged Access for Rancher
+## Privileged Access for Rancher
When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option.
@@ -92,7 +92,7 @@ Recent changes to cert-manager require an upgrade. If you are upgrading Rancher
:::
-##### 1. Add the cert-manager repo
+##### 1. Add the cert-manager Repo
From a system connected to the internet, add the cert-manager repo to Helm:
@@ -101,7 +101,7 @@ helm repo add jetstack https://charts.jetstack.io
helm repo update
```
-##### 2. Fetch the cert-manager chart
+##### 2. Fetch the cert-manager Chart
Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager).
@@ -109,7 +109,7 @@ Fetch the latest cert-manager chart available from the [Helm chart repository](h
helm fetch jetstack/cert-manager --version v1.11.0
```
-##### 3. Retrieve the Cert-Manager CRDs
+##### 3. Retrieve the cert-manager CRDs
Download the required CRD file for cert-manager:
```plain
@@ -120,7 +120,7 @@ Download the required CRD file for cert-manager:
Copy the fetched charts to a system that has access to the Rancher server cluster to complete installation.
-##### 1. Install Cert-Manager
+#### 1. Install cert-manager
Install cert-manager with the same options you would use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry.
@@ -160,7 +160,8 @@ If you are using self-signed certificates, install cert-manager:
-##### 2. Install Rancher
+#### 2. Install Rancher
+
First, refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them.
Then, create the namespace for Rancher using kubectl:
@@ -192,9 +193,9 @@ Placeholder | Description
**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8`
-#### Option B: Certificates From Files using Kubernetes Secrets
+#### Option B: Certificates From Files Using Kubernetes Secrets
-##### 1. Create secrets
+##### 1. Create Secrets
Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher.
diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md
index 9f3654619d3..7f04e7974b6 100644
--- a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md
+++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md
@@ -27,7 +27,7 @@ First configure the HTTP proxy settings on the K3s systemd service, so that K3s'
```
cat <<'EOF' | sudo tee /etc/default/k3s > /dev/null
HTTP_PROXY=http://${proxy_host}
-HTTPS_PROXY=http://${proxy_host}"
+HTTPS_PROXY=http://${proxy_host}
NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
EOF
```
@@ -71,7 +71,7 @@ Then you have to configure the HTTP proxy settings on the RKE2 systemd service,
```
cat <<'EOF' | sudo tee /etc/default/rke2-server > /dev/null
HTTP_PROXY=http://${proxy_host}
-HTTPS_PROXY=http://${proxy_host}"
+HTTPS_PROXY=http://${proxy_host}
NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
EOF
```
diff --git a/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
index 23f68930049..89a9b2ce977 100644
--- a/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
+++ b/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
@@ -109,7 +109,7 @@ Rancher Server is distributed as a Docker image, which have tags attached to the
| -------------------------- | ------ |
| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. |
| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. |
-| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. |
+| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at Docker Hub. |
:::note
diff --git a/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md b/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md
index a916003a9fa..3a618cedc40 100644
--- a/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md
+++ b/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md
@@ -102,8 +102,6 @@ There is a [known issue](https://github.com/rancher/rancher/issues/25478) in whi
### Maintaining Availability for Applications During Upgrades
-_Available as of RKE v1.1.0_
-
In [this section of the RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster.
### Configuring the Upgrade Strategy in the cluster.yml
diff --git a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
index a474f770b1b..71b3a37204c 100644
--- a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
+++ b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
@@ -36,7 +36,7 @@ Administrators might configure the RKE metadata settings to do the following:
- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub
- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher
-### Refresh Kubernetes Metadata
+## Refresh Kubernetes Metadata
The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)
@@ -74,7 +74,7 @@ If you don't have an air gap setup, you don't need to specify the URL where Ranc
However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file.
-### Air Gap Setups
+## Air Gap Setups
Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/)
diff --git a/docs/glossary.md b/docs/glossary.md
new file mode 100644
index 00000000000..1db06477569
--- /dev/null
+++ b/docs/glossary.md
@@ -0,0 +1,17 @@
+---
+title: Glossary
+---
+
+
+
+
+
+This page covers Rancher-specific terminology and symbols which might be unfamiliar, or which differ between Rancher versions.
+
+```mdx-code-block
+import Glossary, {toc as GlossaryTOC} from "/shared-files/_glossary.md"
+
+
+
+export const toc = GlossaryTOC;
+```
\ No newline at end of file
diff --git a/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
index dc19b0a28ea..f7266be3ff3 100644
--- a/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
+++ b/docs/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -80,11 +80,11 @@ If you use a certificate signed by a recognized CA, installing your certificate
1. Enter the following command.
- ```
- docker run -d --restart=unless-stopped \
- -p 80:80 -p 443:443 \
- rancher/rancher:latest --no-cacerts
- ```
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
diff --git a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md
index c7a0a280078..991d9d9ae3f 100644
--- a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md
+++ b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md
@@ -28,14 +28,14 @@ spec:
rkeConfig:
machineGlobalConfig:
audit-policy-file: |
- apiVersion: audit.k8s.io/v1
- kind: Policy
- rules:
- - level: RequestResponse
- resources:
- - group: ""
- resources:
- - pods
+ apiVersion: audit.k8s.io/v1
+ kind: Policy
+ rules:
+ - level: RequestResponse
+ resources:
+ - group: ""
+ resources:
+ - pods
```
### Method 2: Use the Directives, `machineSelectorFiles` and `machineGlobalConfig`
diff --git a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md
index f0795669975..4f0a15c7984 100644
--- a/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md
+++ b/docs/how-to-guides/advanced-user-guides/enable-api-audit-log.md
@@ -36,12 +36,12 @@ The usage below defines rules about what the audit log should record and what da
The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#api-audit-log-options) setting.
-| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body |
-| --------------------- | ---------------- | ------------ | ----------------- | ------------- |
-| `0` | | | | |
-| `1` | ✓ | | | |
-| `2` | ✓ | ✓ | | |
-| `3` | ✓ | ✓ | ✓ | ✓ |
+| `AUDIT_LEVEL` Setting | Metadata | Request Body | Response Body |
+| --------------------- | -------- | ------------ | ------------- |
+| `0` | | | |
+| `1` | ✓ | | |
+| `2` | ✓ | ✓ | |
+| `3` | ✓ | ✓ | ✓ |
## Viewing API Audit Logs
diff --git a/docs/how-to-guides/advanced-user-guides/enable-experimental-features/ui-server-side-pagination.md b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/ui-server-side-pagination.md
new file mode 100644
index 00000000000..f3faaf47ceb
--- /dev/null
+++ b/docs/how-to-guides/advanced-user-guides/enable-experimental-features/ui-server-side-pagination.md
@@ -0,0 +1,41 @@
+---
+title: UI Server-Side Pagination
+---
+
+
+
+
+
+:::caution
+UI server-side pagination is not intended for use in production at this time. This feature is considered highly experimental. SUSE customers should consult SUSE Support before activating this feature.
+:::
+
+
+UI server-side pagination caching provides an optional SQLite-backed cache of Kubernetes objects to improve performance. This unlocks sorting, filtering and pagination features used by the UI to restrict the amount of resources it fetches and stores in browser memory. These features are primarily used to improve list performance for resources with high counts.
+
+This feature creates file system based caches in the `rancher` pods of the upstream cluster, and in the `cattle-cluster-agent` pods of the downstream clusters. In most environments, disk usage and I/O should not be significant. However, you should monitor activity after you enable caching.
+
+SQLite-backed caching persists copies of any cached Kubernetes objects to disk. See [Encrypting SQLite-backed Caching](#encrypting-sqlite-backed-caches) if this is a security concern.
+
+## Enabling UI Server-Side Pagination
+
+1. In the upper left corner, click **☰ > Global Settings > Feature Flags**.
+1. Find **`ui-sql-cache`** and select **⋮ > Activate > Activate**.
+1. Wait for Rancher to restart. This also restarts agents on all downstream clusters.
+1. In the upper left corner, click **☰ > Global Settings > Performance**.
+1. Go to **Server-side Pagination** and check the **Enable Server-side Pagination** option.
+1. Click **Apply**.
+1. Reload the page with the browser button (or the equivalent keyboard combination, typically `CTRL + R` on Windows and Linux, and `⌘ + R` on macOS).
+
+
+## Encrypting SQLite-backed Caches
+
+UI server-side pagination persists copies of any cached Kubernetes objects to disk. If you're concerned about the safety of this data, you can encrypt all objects before they are persisted to disk, by setting the environment variable `CATTLE_ENCRYPT_CACHE_ALL` to `true` in `rancher` pods in the upstream cluster and `cattle-cluster-agent` pods in the downstream clusters.
+
+Secrets and security Tokens are always encrypted regardless of the above setting.
+
+## Known Limitations of UI Server-Side Pagination
+
+This initial release improves the performance of Pods, Secrets, Nodes and ConfigMaps in the Cluster Explorer pages, and most resources in the Explorer's **More Resources** section.
+
+Pages can't be automatically refreshed. You can manually refresh table contents by clicking the **Refresh** button.
diff --git a/docs/how-to-guides/advanced-user-guides/enable-user-retention.md b/docs/how-to-guides/advanced-user-guides/enable-user-retention.md
new file mode 100644
index 00000000000..ca811be2b83
--- /dev/null
+++ b/docs/how-to-guides/advanced-user-guides/enable-user-retention.md
@@ -0,0 +1,62 @@
+---
+title: Enabling User Retention
+---
+
+
+
+
+
+In Rancher v2.8.5 and later, you can enable user retention to automatically disable or delete inactive user accounts after a configurable time period.
+
+The user retention feature is off by default.
+
+## Enabling User Retention with kubectl
+
+To enable user retention, you must set `user-retention-cron`. You must also set at least one of `disable-inactive-user-after` or `delete-inactive-user-after`. You can use `kubectl edit setting ` to open your editor of choice and set these values.
+
+## Configuring Rancher to Delete Users, Disable Users, or Combine Operations
+
+Rancher uses two global user retention settings to determine if and when users are disabled or deleted after a certain period of inactivity. Disabled accounts must be re-enabled before users can log in again. If an account is deleted without being disabled, users may be able to log in through external authentication and the deleted account will be recreated.
+
+The global settings, `disable-inactive-user-after` and `delete-inactive-user-after`, do not block one another from running.
+
+For example, you can set both operations to run. If you give `disable-inactive-user-after` a shorter duration than `delete-inactive-user-after`, the user retention process disables inactive accounts before deleting them.
+
+You can also edit some user retention settings on a specific user's `UserAttribute`. Setting these values overrides the global settings. See [User-specific User Retention Overrides](#user-specific-user-retention-overrides) for more details.
+
+### Required User Retention Settings
+
+The following are global settings:
+
+- `user-retention-cron`: Describes how often the user retention process runs. The value is a cron expression (for example, `0 * * * *` for every hour).
+ - `disable-inactive-user-after`: The amount of time that a user account can be inactive before the process disables an account. Disabling an account forces the user to request that an administrator re-enable the account before they can log in to use it. Values are expressed in [time.Duration units](https://pkg.go.dev/time#ParseDuration) (for example, `720h` for 720 hours or 30 days). The value must be greater than `auth-user-session-ttl-minutes`, which is `16h` by default. If the value is not set, set to the empty string, or is equal to 0, the process does not disable any inactive accounts.
+- `delete-inactive-user-after`: The amount of time that a user account can be inactive before the process deletes the account. Values are expressed in time.Duration units (for example, `720h` for 720 hours or 30 days). The value must be greater than `auth-user-session-ttl-minutes`, which is `16h` by default. The value should be greater than `336h` (14 days), otherwise it is rejected by the Rancher webhook. If you need the value to be lower than 14 days, you can [bypass the webhook](../../reference-guides/rancher-webhook.md#bypassing-the-webhook). If the value is not set, set to the empty string, or is equal to 0, the process does not delete any inactive accounts.
+
+### Optional User Retention Settings
+
+The following are global settings:
+
+- `user-retention-dry-run`: If set to `true`, the user retention process runs without actually deleting or disabling any user accounts. This can help test user retention behavior before allowing the process to disable or delete user accounts in a production environment.
+- `user-last-login-default`: If a user does not have `UserAttribute.LastLogin` set on their account, this setting is used instead. The value is expressed as an [RFC 3339 date-time](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) truncated to the last second; for example, `2023-03-01T00:00:00Z`. If the value is set to the empty string or is equal to 0, this setting is not used.
+
+#### User-specific User Retention Overrides
+
+The following are user-specific overrides to the global settings for special cases. These settings are applied by editing the `UserAttribute` associated with a given account:
+
+```
+kubectl edit userattribute
+```
+
+- `disableAfter`: The user-specific override for `disable-inactive-user-after`. The value is expressed in [time.Duration units](https://pkg.go.dev/time#ParseDuration) and truncated to the second. If the value is set to `0s` then the account won't be subject to disabling.
+- `deleteAfter`: The user-specific override for `delete-inactive-user-after`. The value is expressed in [time.Duration units](https://pkg.go.dev/time#ParseDuration) and truncated to the second. If the value is set to `0s` then the account won't be subject to deletion.
+
+## Viewing User Retention Settings in the Rancher UI
+
+You can see which user retention settings are applied to which users.
+
+1. In the upper left corner, click **☰ > Users & Authentication**.
+1. In the left navigation menu, select **Users**.
+
+The **Disable After** and **Delete After** columns for each user account indicate how long the account can be inactive before it is disabled or deleted from Rancher. There is also a **Last Login** column roughly indicating when the account was last active.
+
+The same information is available if you click a user's name in the **Users** table and select the **Detail** tab.
diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md
index e965bc9a8b1..47a0cc91f10 100644
--- a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md
+++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md
@@ -6,19 +6,21 @@ title: Generate and View Traffic from Istio
-This section describes how to view the traffic that is being managed by Istio.
-
## The Kiali Traffic Graph
-The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you are able to view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other.
+The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you can view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other.
-:::note Prerequisites:
+## Prerequisites
-To enable traffic to show up in the graph, ensure you have prometheus installed in the cluster. Rancher-istio installs Kiali configured by default to work with the rancher-monitoring chart. You can use rancher-monitoring or install your own monitoring solution. Optional: you can change configuration on how data scraping occurs by setting the [Selectors & Scrape Configs](../../../integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) options.
+To enable traffic to show up in the graph, ensure that you have Prometheus installed in the cluster. `Rancher-istio` installs Kiali, and configures it by default to work with the `rancher-monitoring` chart. You can use `rancher-monitoring` or install your own monitoring solution.
-:::
+Additionally, for Istio installations version `103.1.0+up1.19.6` and later, Kiali uses a token value for its authentication strategy. If you are trying to generate or retrieve the token (e.g. for login), note that the name of the Kiali service account in Rancher is `kiali`. For more information, refer to the [Kiali token authentication FAQ](https://kiali.io/docs/faq/authentication/).
-To see the traffic graph,
+Optional: You can configure which namespaces data scraping occurs in by setting the Helm chart options described in [Selectors & Scrape Configs](../../../integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md).
+
+## Traffic Visualization
+
+To see the traffic graph follow the steps below:
1. In the cluster where Istio is installed, click **Istio** in the left navigation bar.
1. Click the **Kiali** link.
diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md
index 2e4ab117ca0..6d4b13b4ad4 100644
--- a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md
+++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md
@@ -42,7 +42,7 @@ For more information about the default limits, see [this page.](../../../referen
### Enable Monitoring for use without SSL
-1. Click **☰ > Cluster Management**.
+1. Click **☰ > Cluster Management**.
1. Go to the cluster that you created and click **Explore**.
1. Click **Cluster Tools** (bottom left corner).
1. Click **Install** by Monitoring.
@@ -77,3 +77,79 @@ key.pfx=`base64-content`
```
Then **Cert File Path** would be set to `/etc/alertmanager/secrets/cert.pem`.
+
+## Rancher Performance Dashboard
+
+When monitoring is installed on the upstream (local) cluster, you are given basic health metrics about the Rancher pods, such as CPU and memory data. To get advanced metrics for your local Rancher server, you must additionally enable the Rancher Performance Dashboard for Grafana.
+
+This dashboard provides access to the following advanced metrics:
+
+- Handler Average Execution Times Over Last 5 Minutes
+- Rancher API Average Request Times Over Last 5 Minutes
+- Subscribe Average Request Times Over Last 5 Minutes
+- Lasso Controller Work Queue Depth (Top 20)
+- Number of Rancher Requests (Top 20)
+- Number of Failed Rancher API Requests (Top 20)
+- K8s Proxy Store Average Request Times Over Last 5 Minutes (Top 20)
+- K8s Proxy Client Average Request Times Over Last 5 Minutes (Top 20)
+- Cached Objects by GroupVersionKind (Top 20)
+- Lasso Handler Executions (Top 20)
+- Handler Executions Over Last 2 Minutes (Top 20)
+- Total Handler Executions with Error (Top 20)
+- Data Transmitted by Remote Dialer Sessions (Top 20)
+- Errors for Remote Dialer Sessions (Top 20)
+- Remote Dialer Connections Removed (Top 20)
+- Remote Dialer Connections Added by Client (Top 20)
+
+:::note
+
+Profiling data (such as advanced memory or CPU analysis) is not present as it is a very context-dependent technique that's meant for debugging and not intended for normal observation.
+
+:::
+
+### Enabling the Rancher Performance Dashboard
+
+To enable the Rancher Performance Dashboard:
+
+
+
+
+Use the following options with the Helm CLI:
+
+```bash
+--set extraEnv\[0\].name="CATTLE_PROMETHEUS_METRICS" --set-string extraEnv\[0\].value=true
+```
+
+You can also include the following snippet in your Rancher Helm chart's values.yaml file:
+
+```yaml
+extraEnv:
+ - name: "CATTLE_PROMETHEUS_METRICS"
+ value: "true"
+```
+
+
+
+
+1. Click **☰ > Cluster Management**.
+1. Go to the row of the `local` cluster and click **Explore**.
+1. Click **Workloads > Deployments**.
+1. Use the dropdown menu at the top to filter for **All Namespaces**.
+1. Under the `cattle-system` namespace, go to the `rancher` row and click **⋮ > Edit Config**
+1. Under **Environment Variables**, click **Add Variable**.
+1. For **Type**, select `Key/Value Pair`.
+1. For **Variable Name**, enter `CATTLE_PROMETHEUS_METRICS`.
+1. For **Value**, enter `true`.
+1. Click **Save** to apply the change.
+
+
+
+
+### Accessing the Rancher Performance Dashboard
+
+1. Click **☰ > Cluster Management**.
+1. Go to the row of the `local` cluster and click **Explore**.
+1. Click **Monitoring**
+1. Select the **Grafana** dashboard.
+1. From the sidebar, click **Search dashboards**.
+1. Enter `Rancher Performance Debugging` and select it.
diff --git a/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
index 2369abe3948..b4554167411 100644
--- a/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
+++ b/docs/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
@@ -6,7 +6,17 @@ title: Opening Ports with firewalld
-> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off.
+:::danger
+
+Enabling firewalld can cause serious network communication problems.
+
+For proper network function, firewalld must be disabled on systems running RKE2. [Firewalld conflicts with Canal](https://docs.rke2.io/known_issues#firewalld-conflicts-with-default-networking), RKE2's default networking stack.
+
+Firewalld must also be disabled on systems running Kubernetes 1.19 and later.
+
+If you enable firewalld on systems running Kubernetes 1.18 or earlier, understand that this may cause networking issues. CNIs in Kubernetes dynamically update iptables and networking rules independently of any external firewalls, such as firewalld. This can cause unexpected behavior when the CNI and the external firewall conflict.
+
+:::
Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm.
diff --git a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
index 59757908a7b..27389737ab6 100644
--- a/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
+++ b/docs/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -8,9 +8,9 @@ title: Tuning etcd for Large Installations
When Rancher is used to manage [a large infrastructure](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md) it is recommended to increase the default keyspace for etcd from the default 2 GB. The maximum setting is 8 GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval.
-The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers.
+The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) setting on the etcd servers.
-### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB
+## Example: This Snippet of the RKE Cluster.yml file Increases the Keyspace Size to 5GB
```yaml
# RKE cluster.yml
@@ -21,9 +21,9 @@ services:
quota-backend-bytes: 5368709120
```
-## Scaling etcd disk performance
+## Scaling etcd Disk Performance
-You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.4.0/tuning/#disk) on how to tune the disk priority on the host.
+You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.5/tuning/#disk) on how to tune the disk priority on the host.
Additionally, to reduce IO contention on the disks for etcd, you can use a dedicated device for the data and wal directory. Based on etcd best practices, mirroring RAID configurations are unnecessary because etcd replicates data between the nodes in the cluster. You can use striping RAID configurations to increase available IOPS.
diff --git a/docs/how-to-guides/new-user-guides/add-users-to-projects.md b/docs/how-to-guides/new-user-guides/add-users-to-projects.md
index d99e7c18120..d3beb2fb0b5 100644
--- a/docs/how-to-guides/new-user-guides/add-users-to-projects.md
+++ b/docs/how-to-guides/new-user-guides/add-users-to-projects.md
@@ -16,11 +16,11 @@ Want to provide a user with access to _all_ projects within a cluster? See [Addi
:::
-### Adding Members to a New Project
+## Adding Members to a New Project
You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)
-### Adding Members to an Existing Project
+## Adding Members to an Existing Project
Following project creation, you can add users as project members so that they can access its resources.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
index a1539a663a7..747977260c4 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
@@ -56,4 +56,6 @@ If you want to use a node driver that Rancher doesn't support out-of-the-box, yo
### Developing Your Own Node Drivers
-Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/).
+Node drivers are implemented with [Rancher Machine](https://github.com/rancher/machine), a fork of [Docker Machine](https://github.com/docker/machine). Docker Machine is no longer under active development.
+
+Refer to the original [Docker Machine documentation](https://github.com/docker/docs/blob/vnext-engine/machine/overview.md) for details on how to develop your own node drivers.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
index 7f95ca305be..365c879aee4 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -60,4 +60,4 @@ To convert an existing cluster to use an RKE template,
- A new RKE template is created.
- The cluster is converted to use the new template.
-- New clusters can be [created from the new template.](apply-templates.md#creating-a-cluster-from-an-rke-template)
\ No newline at end of file
+- New clusters can be [created from the new template.](#creating-a-cluster-from-an-rke-template)
\ No newline at end of file
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
index b923d862b5c..4556a3dfa5a 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
@@ -21,20 +21,21 @@ The account used to enable the external provider will be granted admin permissio
The Rancher authentication proxy integrates with the following external authentication services.
-| Auth Service |
-| ------------------------------------------------------------------------------------------------ |
-| [Microsoft Active Directory](configure-active-directory.md) |
-| [GitHub](configure-github.md) |
-| [Microsoft Azure AD](configure-azure-ad.md) |
-| [FreeIPA](configure-freeipa.md) |
-| [OpenLDAP](../configure-openldap/configure-openldap.md) |
+| Auth Service |
+|------------------------------------------------------------------------------------------------------------------------|
+| [Microsoft Active Directory](configure-active-directory.md) |
+| [GitHub](configure-github.md) |
+| [Microsoft Azure AD](configure-azure-ad.md) |
+| [FreeIPA](configure-freeipa.md) |
+| [OpenLDAP](../configure-openldap/configure-openldap.md) |
| [Microsoft AD FS](../configure-microsoft-ad-federation-service-saml/configure-microsoft-ad-federation-service-saml.md) |
-| [PingIdentity](configure-pingidentity.md) |
-| [Keycloak (OIDC)](configure-keycloak-oidc.md) |
-| [Keycloak (SAML)](configure-keycloak-saml.md) |
-| [Okta](configure-okta-saml.md) |
-| [Google OAuth](configure-google-oauth.md) |
-| [Shibboleth](../configure-shibboleth-saml/configure-shibboleth-saml.md) |
+| [PingIdentity](configure-pingidentity.md) |
+| [Keycloak (OIDC)](configure-keycloak-oidc.md) |
+| [Keycloak (SAML)](configure-keycloak-saml.md) |
+| [Okta](configure-okta-saml.md) |
+| [Google OAuth](configure-google-oauth.md) |
+| [Shibboleth](../configure-shibboleth-saml/configure-shibboleth-saml.md) |
+| [Generic (OIDC)](configure-generic-oidc.md) |
However, Rancher also provides [local authentication](create-local-users.md).
@@ -62,6 +63,12 @@ After you configure Rancher to allow sign on using an external authentication se
| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. |
| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. |
+:::warning
+
+Only trusted admin-level users should have access to the local cluster, which manages all of the other clusters in a Rancher instance. Rancher is directly installed on the local cluster, and Rancher's management features allow admins on the local cluster to provision, modify, connect to, and view details about downstream clusters. Since the local cluster is key to a Rancher instance's architecture, inappropriate access carries security risks.
+
+:::
+
To set the Rancher access level for users in the authorization service, follow these steps:
1. In the upper left corner, click **☰ > Users & Authentication**.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
index 817a490ecda..b17daeabbcd 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
@@ -133,7 +133,17 @@ Here are a few examples of permission combinations that satisfy Rancher's needs:
:::
-#### 4. Copy Azure Application Data
+#### 4. Allow Public Client Flows
+
+To login from Rancher CLI you must allow public client flows:
+
+1. From the left navigation menu, select **Authentication**.
+
+1. Under **Advanced Settings**, select **Yes** on the toggle next to **Allow public client flows**.
+
+ 
+
+#### 5. Copy Azure Application Data

@@ -167,7 +177,7 @@ Custom Endpoints are not tested or fully supported by Rancher.
You'll also need to manually enter the Graph, Token, and Auth Endpoints.
-- From App registrations , click Endpoints :
+- From **App registrations**, click **Endpoints**:

@@ -176,7 +186,7 @@ You'll also need to manually enter the Graph, Token, and Auth Endpoints.
- **OAuth 2.0 token endpoint (v1)** (Token Endpoint)
- **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint)
-#### 5. Configure Azure AD in Rancher
+#### 6. Configure Azure AD in Rancher
To complete configuration, enter information about your AD instance in the Rancher UI.
@@ -188,7 +198,7 @@ To complete configuration, enter information about your AD instance in the Ranch
1. Click **AzureAD**.
-1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#4-copy-azure-application-data).
+1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data).
:::caution
@@ -221,6 +231,8 @@ To complete configuration, enter information about your AD instance in the Ranch
https://g raph.microsoft.com/abb5adde-bee8-4821-8b03-e63efdc7701c
+1. (Optional) In Rancher v2.9.0 and later, you can filter users' group memberships in Azure AD to reduce the amount of log data generated. See steps 4–5 of [Filtering Users by Azure AD Auth Group Memberships](#filtering-users-by-azure-ad-auth-group-memberships) for full instructions.
+
1. Click **Enable**.
**Result:** Azure Active Directory authentication is configured.
@@ -314,6 +326,29 @@ Endpoint | https://login.partner.microsoftonline.cn/
Graph Endpoint | https://microsoftgraph.chinacloudapi.cn
Token Endpoint | https://login.partner.microsoftonline.cn/{tenantID}/oauth2/v2.0/token
+## Filtering Users by Azure AD Auth Group Memberships
+
+In Rancher v2.9.0 and later, you can filter users' group memberships from Azure AD to reduce the amount of log data generated. If you did not filter group memberships during initial setup, you can still add filters on an existing Azure AD configuration.
+
+:::warning
+
+Filtering out a user group membership affects more than just logging.
+
+Since the filter prevents Rancher from seeing that the user belongs to an excluded group, it also does not see any permissions from that group. This means that excluding a group from the filter can have the side effect of denying users permissions they should have.
+
+:::
+
+1. In Rancher, in the top left corner, click **☰ > Users & Authentication**.
+
+1. In the left navigation menu, click **Auth Provider**.
+
+1. Click **AzureAD**.
+
+1. Click the checkbox next to **Limit users by group membership**.
+
+1. Enter an [OData filter clause](https://learn.microsoft.com/en-us/odata/concepts/queryoptions-overview#filter) into the **Group Membership Filter** field. For example, if you want to limit logging to group memberships whose name starts with `test`, click the checkbox and enter `startswith(displayName,'test')`.
+
+
## Deprecated Azure AD Graph API
@@ -328,4 +363,3 @@ Token Endpoint | https://login.partner.microsoftonline.cn/{tenantID}/oauth2/v2
>- If you don't wish to upgrade to v2.7.0+ after the Azure AD Graph API is retired, you'll need to either:
- Use the built-in Rancher auth or
- Use another third-party auth system and set that up in Rancher. Please see the [authentication docs](authentication-config.md) to learn how to configure other open authentication providers.
-
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-generic-oidc.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-generic-oidc.md
new file mode 100644
index 00000000000..e0d2577e5ff
--- /dev/null
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-generic-oidc.md
@@ -0,0 +1,110 @@
+---
+title: Configure Generic OIDC
+description: Create an OpenID Connect (OIDC) client and configure Rancher to work with your authentication provider. Your users can then sign into Rancher using their login from the authentication provider.
+---
+
+
+
+
+
+If your organization uses an OIDC provider for user authentication, you can configure Rancher to allow login using Identity Provider (IdP) credentials. Rancher supports integration with the OpenID Connect (OIDC) protocol and the SAML protocol. Both implementations are functionally equivalent when used with Rancher. The following instructions describe how to configure Rancher to work using the OIDC protocol.
+
+## Prerequisites
+
+- In Rancher:
+ - Generic OIDC is disabled.
+
+:::note
+Consult the documentation for your specific IdP to complete the listed prerequisites.
+:::
+
+- In your IdP:
+ - Create a new client with the settings below:
+
+ Setting | Value
+ ------------|------------
+ `Client ID` | (e.g. `rancher`)
+ `Name` | (e.g. `rancher`)
+ `Client Protocol` | `openid-connect`
+ `Access Type` | `confidential`
+ `Valid Redirect URI` | `https://yourRancherHostURL/verify-auth`
+
+ - In the new OIDC client, create mappers to expose the users fields.
+ - Create a new Groups Mapper with the settings below:
+
+ Setting | Value
+ ------------|------------
+ `Name` | `Groups Mapper`
+ `Mapper Type` | `Group Membership`
+ `Token Claim Name` | `groups`
+ `Add to ID token` | `OFF`
+ `Add to access token` | `OFF`
+ `Add to user info` | `ON`
+
+ - Create a new Client Audience with the settings below:
+
+ Setting | Value
+ ------------|------------
+ `Name` | `Client Audience`
+ `Mapper Type` | `Audience`
+ `Included Client Audience` |
+ `Add to access token` | `ON`
+
+ - Create a new "Groups Path" with the settings below.
+
+ Setting | Value
+ ------------|------------
+ `Name` | `Group Path`
+ `Mapper Type` | `Group Membership`
+ `Token Claim Name` | `full_group_path`
+ `Full group path` | `ON`
+ `Add to user info` | `ON`
+
+- Important: Rancher will use the value received in the "sub" claim to form the PrincipalID which is the unique identifier in Rancher. It is important to make this a value that will be unique and immutable.
+
+## Configuring Generic OIDC in Rancher
+
+1. In the upper left corner of the Rancher UI, click **☰ > Users & Authentication**.
+1. In the left navigation bar, click **Auth Provider**.
+1. Select **Generic OIDC**.
+1. Complete the **Configure an OIDC account** form. For help with filling the form, see the [configuration reference](#configuration-reference).
+1. Click **Enable**.
+
+ Rancher will redirect you to the IdP login page. Enter your IdP credentials to validate your Rancher Keycloak configuration.
+
+ :::note
+
+ You may need to disable your popup blocker to see the IdP login page.
+
+ :::
+
+**Result:** Rancher is configured to work with your provider using the OIDC protocol. Your users can now sign into Rancher using their IdP logins.
+
+## Configuration Reference
+
+| Field | Description |
+| ------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------|
+| Client ID | The Client ID of your OIDC client. |
+| Client Secret | The generated Secret of your OIDC client. |
+| Private Key/Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. Required if HTTPS/SSL is enabled on your OIDC server. |
+| Endpoints | Choose whether to use the generated values for the Rancher URL, Issue, and Auth Endpoint fields or to provide manual overrides if incorrect. |
+| Rancher URL | The URL for your Rancher Server. |
+| Issuer | The URL of your IdP. If your provider has discovery enabled, Rancher uses the Issuer URL to fetch all of the required URLs. |
+| Auth Endpoint | The URL where users are redirected to authenticate. |
+## Troubleshooting
+
+If you are experiencing issues while testing the connection to the OIDC server, first double-check the configuration options of your OIDC client. You can also inspect the Rancher logs to help pinpoint what's causing issues. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation.
+
+All Generic OIDC related log entries are prepended with either `[generic oidc]` or `[oidc]`.
+
+### You are not redirected to your authentication provider
+
+If you fill out the **Configure a Generic OIDC account** form and click on **Enable**, and you are not redirected to your IdP, verify your OIDC client configuration.
+
+### The generated `Issuer` and `Auth Endpoint` are incorrect
+
+If the `Issuer` and `Auth Endpoint` are generated incorrectly, open the **Configure an OIDC account** form, change **Endpoints** to `Specify (advanced)` and override the `Issuer` value.
+
+### Error: "Invalid grant_type"
+
+In some cases, the "Invalid grant_type" error message may be misleading and is actually caused by setting the `Valid Redirect URI` incorrectly.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
index d53a871ad0b..1f601689bc1 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
@@ -51,7 +51,6 @@ You can integrate Okta with Rancher, so that authenticated users can access Ranc
:::
-
1. After you complete the **Configure Okta Account** form, click **Enable**.
Rancher redirects you to the IdP login page. Enter credentials that authenticate with Okta IdP to validate your Rancher Okta configuration.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md
index 206c5c66709..9271d232ff2 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md
@@ -30,6 +30,14 @@ Within Rancher, each person authenticates as a _user_, which is a login that gra
For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)](manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md).
+## User Retention
+
+In Rancher v2.8.5 and later, you can enable user retention. This feature automatically removes inactive users after a configurable period of time.
+
+The user retention feature is disabled by default.
+
+For more information, see [Enabling User Retention](../../advanced-user-guides/enable-user-retention.md).
+
## Pod Security Policies
_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message.
@@ -82,4 +90,4 @@ The following features are available under **Global Configuration**:
- **Global DNS Entries**
- **Global DNS Providers**
-As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md), [global DNS entries](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details.
+As these are legacy features, please see the Rancher v2.0—v2.4 docs on [catalogs](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md), [global DNS entries](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), and [global DNS providers](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider) for more details.
\ No newline at end of file
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md
index 68bf57e5d79..120c587b0a2 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md
@@ -23,7 +23,7 @@ This option replaces "Rancher" with the value you provide in most places. Files
### Support Links
-Use a url address to send new "File an Issue" reports instead of sending users to the Github issues page. Optionally show Rancher community support links.
+Use a url address to send new "File an Issue" reports instead of sending users to the GitHub issues page. Optionally show Rancher community support links.
### Logo
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
index 419b6cba216..b9847ee8d9f 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
@@ -54,8 +54,20 @@ Since the private registry cannot be configured after the cluster is created, yo
1. Select **☰ > Cluster Management**.
1. On the **Clusters** page, click **Create**.
1. Choose a cluster type.
-1. In the **Cluster Configuration** go to the **Registries** tab and select **Pull images for Rancher from a private registry**.
-1. Enter the registry hostname and credentials.
+1. In the **Cluster Configuration** go to the **Registries** tab.
+1. Check the box next to **Enable cluster scoped container registry for Rancher system container images**.
+1. Enter the registry hostname.
+1. Under **Authentication** select **Create a HTTP Basic Auth Secret** and fill in the credential fields.
1. Click **Create**.
**Result:** The new cluster pulls images from the private registry.
+
+### Working with Private Registry Credentials
+
+When working with private registries, it is important to ensure that any secrets created for these registries are properly backed up. When you add a private registry credential secret through the Rancher GUI and select **Create a HTTP Basic Auth Secret**, the secret is included in backup operations using Rancher Backups.
+
+However, if you create a credential secret outside of the Rancher GUI, such as by using kubectl or Terraform, you must add the `fleet.cattle.io/managed=true` label to indicate that the secret should be included in backups created by Rancher Backups.
+
+For example, if you have a custom private registry named "my-private-registry" and create a secret called "my-reg-creds" for it, apply the `fleet.cattle.io/managed=true` label to this secret. This ensures that your backup process captures the secret, providing easy restoration if needed.
+
+By following this guidance, you can ensure that all of your private registry credentials are backed up and easily accessible in the event of a restore or migration.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/jwt-authentication.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/jwt-authentication.md
new file mode 100644
index 00000000000..7bfe5a19bbe
--- /dev/null
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/jwt-authentication.md
@@ -0,0 +1,17 @@
+---
+title: JSON Web Token (JWT) Authentication
+---
+
+
+
+
+Many 3rd party integrations available for Kubernetes, such as GitLab and HashiCorp Vault, involve giving an external process access to the Kubernetes API using a native Kubernetes Service Account token for authentication.
+
+In Rancher v2.9.0 and later, service accounts on downstream clusters can now authenticate through a JSON web token (JWT) using the Rancher authentication proxy. In Rancher versions earlier than v2.9.0, only Rancher-issued tokens were supported.
+
+To enable this feature, follow these steps:
+
+1. In the upper left corner, click **☰ > Cluster Management**.
+1. Click **Advanced** to open the dropdown menu.
+1. Select **JWT Authentication**.
+1. Click the checkbox for the cluster you want to enable JWT authentication for, and click **Enable**. Alternatively, you can click **⋮** > **Enable**.
diff --git a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
index 512a24f4b3d..22aab5d5cac 100644
--- a/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
+++ b/docs/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
@@ -238,3 +238,9 @@ When you revoke the cluster membership for a standard user that's explicitly ass
- Exercise any [individual project roles](#project-role-reference) they are assigned.
If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships.
+
+### External `RoleTemplate` Behavior
+
+In Rancher v2.9.0 and later, external `RoleTemplate` objects can only be created if the backing `ClusterRole` exists in the local cluster or the `ExternalRules` is set in your configuration.
+
+For context, the backing `ClusterRole` holds cluster rules and privileges, and shares the same `metadata.name` used in the `RoleTemplate` in your respective cluster referenced by the `ClusterRoleTemplateBinding/ProjectRoleTemplateBinding`. Additionally, note that `escalate` permissions on `RoleTemplates` are required to create external `RoleTemplates` with `ExternalRules`.
diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
index 676d09f94ed..1fe878cccfa 100644
--- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
+++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
@@ -62,21 +62,6 @@ Install the [`rancher-backup chart`](https://github.com/rancher/backup-restore-o
### 2. Restore from backup using a Restore custom resource
-:::note Important:
-
-Kubernetes v1.22, available as an experimental feature of v2.6.3, does not support restoring from backup files containing CRDs with the apiVersion `apiextensions.k8s.io/v1beta1`. In v1.22, the default `resourceSet` in the rancher-backup app is updated to collect only CRDs that use `apiextensions.k8s.io/v1`. There are currently two ways to work around this issue:
-
-1. Update the default `resourceSet` to collect the CRDs with the apiVersion v1.
-1. Update the default `resourceSet` and the client to use the new APIs internally, with `apiextensions.k8s.io/v1` as the replacement.
-
- :::note
-
- When making or restoring backups for v1.22, the Rancher version and the local cluster's Kubernetes version should be the same. The Kubernetes version should be considered when restoring a backup since the supported apiVersion in the cluster and in the backup file could be different.
-
- :::
-
-:::
-
1. When using S3 object storage as the backup source for a restore that requires credentials, create a `Secret` object in this cluster to add the S3 credentials. The secret data must have two keys - `accessKey`, and `secretKey`, that contain the S3 credentials.
The secret can be created in any namespace, this example uses the default namespace.
diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
index c4d2ae77476..f41fa7e0ee7 100644
--- a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
+++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
@@ -79,15 +79,20 @@ If you are using [local snapshots](./back-up-rancher-launched-kubernetes-cluster
1. In the **Clusters** page, go to the cluster where you want to remove nodes.
1. In the **Machines** tab, click **⋮ > Delete** on each node you want to delete. Initially, you will see the nodes hang in a `deleting` state, but once all etcd nodes are deleting, they will be removed together. This is due to the fact that Rancher sees all etcd nodes deleting and proceeds to "short circuit" the etcd safe-removal logic.
-1. After all etcd nodes are removed, add a new etcd node that you are planning to restore from.
+1. After all etcd nodes are removed, add the new etcd node that you are planning to restore from. Assign the new node the role of `all` (etcd, controlplane, and worker).
- - For custom clusters, go to the **Registration** tab then copy and run the registration command on your node. If the node has previously been used in a cluster, [clean the node](../manage-clusters/clean-cluster-nodes.md#cleaning-up-nodes) first.
+ - If the node was previously in a cluster, [clean the node](../manage-clusters/clean-cluster-nodes.md#cleaning-up-nodes) first.
+ - For custom clusters, go to the **Registration** tab and check the box for `etcd, controlplane, and worker`. Then copy and run the registration command on your node.
- For node driver clusters, a new node is provisioned automatically.
At this point, Rancher will indicate that restoration from etcd snapshot is required.
1. Restore from an etcd snapshot.
+ :::note
+ As the etcd node is a clean node, you may need to manually create the `/var/lib/rancher//server/db/snapshots/` path.
+ :::
+
- For S3 snapshots, restore using the UI.
1. Click the **Snapshots** tab to view the list of saved snapshots.
1. Go to the snapshot you want to restore and click **⋮ > Restore**.
@@ -95,7 +100,15 @@ If you are using [local snapshots](./back-up-rancher-launched-kubernetes-cluster
1. Click **Restore**.
- For local snapshots, restore using the UI is **not** available.
1. In the upper right corner, click **⋮ > Edit YAML**.
- 1. Define `spec.cluster.rkeConfig.etcdSnapshotRestore.name` as the filename of the snapshot on disk in `/var/lib/rancher//server/db/snapshots/`.
+ 1. The example YAML below can be added under your `rkeConfig` to configure the etcd restore:
+
+ ```yaml
+ ...
+ rkeConfig:
+ etcdSnapshotRestore:
+ name: # This field is required. Refers to the filename of the associated etcdsnapshot object.
+ ...
+ ```
1. After restoration is successful, you can scale your etcd nodes back up to the desired redundancy.
diff --git a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md
index b5c95f85af0..84ae382badb 100644
--- a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md
+++ b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md
@@ -58,7 +58,7 @@ To display prerelease versions:
| rancher-logging | 100.0.0+up3.12.0 | 100.1.2+up3.17.4 |
| rancher-longhorn | 100.0.0+up1.1.2 | 100.1.2+up1.2.4 |
| rancher-monitoring | 100.0.0+up16.6.0 | 100.1.2+up19.0.3 |
-| rancher-sriov (experimental) | 100.0.0+up0.1.0 | 100.0.3+up0.1.0 |
+| rancher-sriov[1](#sriov-chart-deprecation-and-migration) | 100.0.0+up0.1.0 | 100.0.3+up0.1.0 |
| rancher-vsphere-cpi | 100.3.0+up1.2.1 | 100.3.0+up1.2.1 |
| rancher-vsphere-csi | 100.3.0+up2.5.1-rancher1 | 100.3.0+up2.5.1-rancher1 |
| rancher-wins-upgrader | 0.0.100 | 100.0.1+up0.0.1 |
@@ -163,10 +163,37 @@ spec:
:::
+### Add Custom OCI Chart Repositories
+
+:::caution
+
+This feature is currently experimental and is not officially supported in Rancher.
+
+:::
+
+Helm v3 introduced storing Helm charts as [Open Container Initiative (OCI)](https://opencontainers.org/about/overview/) artifacts in container registries. With Rancher v2.9.0, you can add [OCI-based Helm chart repositories](https://helm.sh/docs/topics/registries/) alongside HTTP-based and Git-based repositories. This means you can deploy apps that are stored as OCI artifacts. For more information, see [Using OCI Helm Chart Repositories](./oci-repositories.md).
+
### Helm Compatibility
Only Helm 3 compatible charts are supported.
+### Refresh Chart Repositories
+
+The **Refresh** button can be used to sync changes from selected Helm chart repositories on the **Repositories** page.
+
+To refresh a chart repository:
+
+1. Click **☰ > Cluster Management**.
+1. Find the name of the cluster whose repositories you want to access. Click **Explore** at the end of the cluster's row.
+1. In the left navigation menu on the **Cluster Dashboard**, click **Apps > Repositories**.
+1. Use the toggle next to the **State** field to select all repositories, or toggle specified chart repositories to sync changes.
+1. Click **Refresh**.
+1. The **⋮** at the end of each chart repository row also includes a **Refresh** option, which can be clicked to refresh the respective repository.
+
+Non-Airgap Rancher installations upon refresh will reflect any chart repository changes immediately and you will see the **State** field for updated repositories move from `In Progress` to `Active` once the action is completed.
+
+Airgap installations where Rancher is configured to use the packaged copy of Helm system charts ([`useBundledSystemChart=true`](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md#helm-chart-options-for-air-gap-installations)) will only refer to the [system-chart](https://github.com/rancher/system-charts) repository that comes bundled and will not be able to be refreshed or synced.
+
## Deploy and Upgrade Charts
To install and deploy a chart:
@@ -212,6 +239,31 @@ To upgrade legacy multi-cluster apps:
1. Click **☰**.
1. Under **Legacy Apps**, click **Multi-cluster Apps**.
+### Chart-Specific Information
+
+#### sriov Chart Deprecation and Migration
+
+The `sriov` (SR-IOV network operator) chart from the Rancher Charts repository is deprecated and will be removed in Rancher v2.10. Please migrate to the `sriov-network-operator` chart from the SUSE Edge repository (https://github.com/suse-edge/charts) instead.
+
+To migrate, follow these steps:
+
+1. Add the SUSE Edge repository to your cluster by following the steps in [Add Custom Git Repositories](#add-custom-git-repositories).
+1. For the **Git Repo URL** field, enter `https://github.com/suse-edge/charts`.
+1. Click **Create**.
+1. In the left navigation menu on the **Cluster Dashboard**, click **Apps > Charts**.
+1. Find the `sriov-network-operator` chart and click on it.
+1. Click **Install**.
+1. In the **Name** field, enter the same name you used for your existing `sriov` chart installation.
+1. Click **Next**.
+1. Click **Install**.
+
+**Result:** Rancher redirects to the **Installed Apps** page where your existing installation enters the **Updating** state. The migration is complete when it enters the **Deployed** state.
+
## Limitations
-Dashboard apps or Rancher feature charts can't be installed using the Rancher CLI.
+- Dashboard apps or Rancher feature charts can't be installed using the Rancher CLI.
+
+- When determining the most recent version to display for the **Upgradable** column on the **Apps > Installed Apps** page, rather than only considering versions of the Helm chart from the repository it was installed from, Rancher considers versions of the Helm chart from all repositories on the cluster.
+
+ For example, suppose you install `cert-manager` v1.13.0 from repository A, where v1.14.0 is now the most recent version available. In this case, you expect **Upgradable** to display v1.14.0. However, if the cluster also has access to repository B where v1.15.0 of `cert-manager` is available, then **Upgradable** displays v1.15.0 even though the original installation used repository A.
+
\ No newline at end of file
diff --git a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/oci-repositories.md b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/oci-repositories.md
new file mode 100644
index 00000000000..4626e592b8d
--- /dev/null
+++ b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/oci-repositories.md
@@ -0,0 +1,115 @@
+---
+title: Using OCI-Based Helm Chart Repositories
+---
+
+
+
+
+
+:::caution
+
+This feature is currently experimental and is not officially supported in Rancher.
+
+:::
+
+Helm v3 introduced storing Helm charts as [Open Container Initiative (OCI)](https://opencontainers.org/about/overview/) artifacts in container registries. With Rancher v2.9.0, you can add [OCI-based Helm chart repositories](https://helm.sh/docs/topics/registries/) alongside HTTP-based and Git-based repositories. This means that you can deploy apps that are stored as OCI artifacts.
+
+## Add an OCI-Based Helm Chart Repository
+
+To add an OCI-based Helm chart repository through the Rancher UI:
+
+1. Click **☰ > Cluster Management**.
+2. Find the name of the cluster whose repositories you want to access. Click **Explore** at the end of the cluster's row.
+3. In the left navigation bar, select **Apps > Repositories**.
+4. Click **Create**.
+5. Enter a **Name** for the registry. Select **OCI Repository** as the target.
+6. Enter the **OCI Repository Host URL** for the registry. The registry endpoint must not contain anything besides OCI Helm Chart artifacts. The artifacts should all have unique names. If you attempt to add an endpoint that contains any other kinds of files or artifacts, the OCI repository will not be added.
+
+ :::note
+
+ You can use the **OCI URL** field to fine-tune how many charts from the registry are available for installation on Rancher. More generic endpoints target more charts, as the following examples demonstrate:
+
+ - `oci://`: Every chart in the registry becomes available for installation, regardless of namespace or tag.
+ - `oci:///`: Every chart in the specified namespace within the registry becomes available for installation.
+ - `oci:////`: Only the specified chart and any associated tags or versions of that chart become available for installation.
+ - `oci:////:`: Only the chart with the specified tag becomes available for installation.
+
+ :::
+
+7. Set up authentication. Select **Basicauth** from the authentication field and enter a username and password as required. Otherwise, create or select an **Authentication** secret. See [Authentication](#authentication-for-oci-based-helm-chart-repositories) for a full description.
+8. (optional) Enter a base64 encoded DER certificate in the **CA Cert Bundle** field. This field is for cases where you have a private OCI-based Helm chart repository and need Rancher to trust its certificates.
+9. (optional) To allow insecure connections without performing an SSL check, select **Skip TLS Verification**. To force Rancher to use HTTP instead of HTTPS to send requests to the repository, select **Insecure Plain Http**.
+10. (optional) If your repository has a rate limiting policy and may respond with status code `429 Too Many Requests`, you may want to fill out the fields under **Exponential Back Off**:
+ - **Min Wait**: The minimum duration in seconds that Rancher should wait before retrying. The default is 1 second.
+ - **Max Wait**: The maximum duration in seconds that Rancher should wait before retrying. The default is 5 second.
+ - **Max Number of Retries**: The default is 5 retries.
+
+ Once these values are set, Rancher responds to the `429` status code by staggering requests based on the minimum and maximum wait values. The wait time between retries increases exponentially, until Rancher has sent the maximum number of retries set. See [Rate Limiting](#rate-limiting-of-oci-based-helm-chart-repositories) for more details.
+11. Add any labels and annotations.
+12. Click **Create**.
+
+It may take some time for the OCI repository to activate. This is particularly true if the OCI endpoint contains multiple namespaces.
+
+## Authentication for OCI-Based Helm Chart Repositories
+
+Rancher supports BasicAuth for OCI registries. You must create a [**BasicAuth** Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret). You can also [create the secret through the Rancher UI](../kubernetes-resources-setup/secrets.md).
+
+
+The CRD that is linked to the OCI-based Helm repository is `ClusterRepo`.
+
+## View Helm Charts in OCI-Based Helm Chart Repositories
+
+To view Helm charts in the OCI-based Helm chart repository after it achieves an `Active` state:
+
+1. Click **☰**. Under **Explore Cluster** in the left navigation menu, select a cluster.
+1. Click **Apps > Charts**.
+1. Select the OCI-based Helm chart repository from the dropdown.
+
+## Refresh an OCI-Based Helm Chart Repository
+
+Rancher automatically refreshes the OCI-based Helm chart repository every 6 hours.
+
+If you need to update immediately, you can [perform a manual refresh](./helm-charts-in-rancher.md#refresh-chart-repositories).
+
+## Update an OCI-Based Helm Chart Repository Configuration
+
+1. Click **☰ > Cluster Management**.
+1. Find the name of the cluster whose repositories you want to access. Click **Explore** at the end of the cluster's row.
+1. In the left navigation bar, select **Apps > Repositories**.
+1. Find the row associated with the OCI-based Helm chart repository, and click **⋮**.
+1. From the submenu, select **Edit Config**.
+
+## Delete an OCI-Based Helm Chart Repository
+
+1. Click **☰ > Cluster Management**.
+1. Find the name of the cluster whose repositories you want to access. Click **Explore** at the end of the cluster's row.
+1. In the left navigation bar, select **Apps > Repositories**.
+1. Select the row associated with the OCI-based Helm chart repository, and click **Delete**.
+
+## Size Limitations of OCI-Based Helm Chart Repositories in Rancher
+
+Due to security concerns, there are limitations on how large of a Helm chart you can deploy through an OCI-based repository, and how much metadata you can use to describe the Helm charts within a single OCI endpoint.
+
+Rancher can deploy OCI Helm charts up to 20 MB in size.
+
+## Rate Limiting of OCI-Based Helm Chart Repositories
+
+Different OCI registries implement rate limiting in different ways.
+
+Most servers return a `Retry-After` header, indicating how long to wait before rate limiting is lifted.
+
+Docker Hub returns a `429` status code when it completes all allocated requests. It also returns a `RateLimit-Remaining` header which describes the rate limiting policy.
+
+Rancher currently checks for the `Retry-After` header. It also handles Docker Hub-style responses (status code `429` and the `RateLimit-Remaining` header) and automatically waits before making a new request. When handling `Retry-After` or Docker Hub-style responses, Rancher ignores `ExponentialBackOff` values.
+
+If you have an OCI-based Helm chart repository which doesn't implement the `Retry-After` or `RateLimit-Remaining` headers, and think you may be rate-limited at some point, fill out the fields under **Exponential Back Off** when you add the repository.
+
+For example, if you have an OCI-based Helm chart repository that doesn't return a `Retry-After` header, but you know that the server allows 50 requests in 24 hours, you can provide Rancher a **Min Wait** value of **86400** seconds, a **Max Wait** value of **90000** seconds, and a **Max Number of Retries** value of **1**. Then, if Rancher gets rate limited by the server, Rancher will wait for 24 hours before trying again. The request should succeed as Rancher hasn't sent any other requests in the previous 24 hours.
+
+## Troubleshooting OCI-based Helm Registries
+
+- To enhance logging information, [enable the debug option](../../../troubleshooting/other-troubleshooting-tips/logging.md#kubernetes-install) while deploying Rancher.
+
+- If there is any discrepancy between the repository contents and Rancher, you should refresh the cluster repository as a first resort. If the discrepancy persists, delete the OCI-based Helm chart repository from Rancher and add it again. Deleting the repository won't delete any Helm charts that are already installed.
+
+- Apps installed through OCI-based Helm chart repositories are subject to a known issue with how Rancher displays upgradeable version information. See the [Limitations](./helm-charts-in-rancher.md#limitations) section of **Helm Charts and Apps** for more details.
diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
index ddc85f764a8..7442579f3bb 100644
--- a/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
+++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
@@ -19,7 +19,7 @@ These nodes must be in the same region. You may place these servers in separate
To install the Rancher management server on a high-availability RKE2 cluster, we recommend setting up the following infrastructure:
- **Three Linux nodes,** typically virtual machines, in the infrastructure provider of your choice.
-- **A load balancer** to direct traffic to the two nodes.
+- **A load balancer** to direct traffic to the nodes.
- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it.
### 1. Set up Linux Nodes
@@ -51,7 +51,7 @@ Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance
:::
-### 4. Set up the DNS Record
+### 3. Set up the DNS Record
Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer.
@@ -59,4 +59,4 @@ Depending on your environment, this may be an A record pointing to the load bala
You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one.
-For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)
\ No newline at end of file
+For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index 14ae384e387..374e402d4b4 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -49,5 +49,5 @@ number of nodes for each Kubernetes role, refer to the section on [recommended a
### Networking
-* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks).
+* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://etcd.io/docs/v3.5/tuning/) allow etcd to run in most networks (except really high latency networks).
* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider](../set-up-cloud-providers/set-up-cloud-providers.md) resources, consult the documentation for any restrictions (i.e. zone storage restrictions).
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
index c709d847ae3..8d4f7932f4c 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -57,7 +57,7 @@ The number of nodes that you can lose at once while maintaining cluster availabi
References:
-* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance)
+* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.5/faq/#what-is-failure-tolerance)
* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
### Number of Worker Nodes
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-azure.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-azure.md
new file mode 100644
index 00000000000..9f77591a582
--- /dev/null
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-azure.md
@@ -0,0 +1,211 @@
+---
+title: Migrating Azure In-tree to Out-of-tree
+---
+
+
+
+
+
+Kubernetes is moving away from maintaining cloud providers in-tree.
+
+Starting with Kubernetes 1.29, in-tree cloud providers have been disabled. You must disable `DisableCloudProviders` and `DisableKubeletCloudCredentialProvider` to use the in-tree Azure cloud provider or migrate from in-tree cloud provider to out-of-tree provider. You can disable the required feature gates by setting `feature-gates=DisableCloudProviders=false` as an additional argument for the cluster's Kubelet, Controller Manager, and API Server in the advanced cluster configuration. Additionally, set `DisableKubeletCloudCredentialProvider=false` in the Kubelet's arguments to enable in-tree functionality for authenticating to Azure container registries for image pull credentials. See [upstream docs](https://github.com/kubernetes/kubernetes/pull/117503) for more details.
+
+In Kubernetes v1.30 and later, the in-tree cloud providers have been removed. Rancher allows you to upgrade to Kubernetes v1.30 when you migrate from an in-tree to out-of-tree provider.
+
+To migrate from the in-tree cloud provider to the out-of-tree Azure cloud provider, you must stop the existing cluster's kube controller manager and install the Azure cloud controller manager.
+
+If it's acceptable to have some downtime during migration, follow the instructions to [set up an external cloud provider](../set-up-cloud-providers/azure.md#using-the-out-of-tree-azure-cloud-provider). These instructions outline how to configure the out-of-tree cloud provider for a newly provisioned cluster. During set up, there will be some downtime, as there is a time gap between when the old cloud provider stops running and when the new cloud provider starts to run.
+
+If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager.
+
+:::note Important:
+The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration).
+
+Starting with Kubernetes v1.26, in-tree persistent volume types `kubernetes.io/azure-disk` and `kubernetes.io/azure-file` are deprecated and no longer supported. There are no plans to remove these drivers following their deprecation, however you should migrate to the corresponding CSI drivers, `disk.csi.azure.com` and `file.csi.azure.com`. To review the migration options for your storage classes and upgrade your cluster to use Azure Disks and Azure Files CSI drivers, see [Migrate from in-tree to CSI drivers](https://learn.microsoft.com/en-us/azure/aks/csi-migrate-in-tree-volumes).
+:::
+
+
+
+
+1. Update the cluster config to enable leader migration:
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kube-controller-manager-arg:
+ - enable-leader-migration
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/control-plane-role
+ operator: In
+ values:
+ - 'true'
+```
+
+Note that the cloud provider is still `azure` at this step:
+
+```yaml
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ cloud-provider-name: azure
+```
+
+2. Cordon control plane nodes so that Azure cloud controller pods run on nodes only after upgrading to the external cloud provider:
+
+```shell
+kubectl cordon -l "node-role.kubernetes.io/control-plane=true"
+```
+
+3. To deploy the Azure cloud controller manager, use any of the available options:
+- UI: Follow steps 1-10 of [Helm chart installation from UI](../set-up-cloud-providers/azure.md#helm-chart-installation-from-ui) to install the cloud controller manager chart.
+- CLI: Follow steps 1-4 of [Helm chart installation from CLI](../set-up-cloud-providers/azure.md#helm-chart-installation-from-cli).
+- Update the cluster's additional manifest: Follow steps 2-3 to [install the cloud controller manager chart](../set-up-cloud-providers/azure.md#using-the-out-of-tree-azure-cloud-provider).
+
+Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes.
+
+4. To enable leader migration, add `--enable-leader-migration` to the container arguments of `cloud-controller-manager`:
+
+```shell
+kubectl -n kube-system patch deployment cloud-controller-manager \
+--type=json \
+-p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--enable-leader-migration"}]'
+```
+
+5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller manager.
+ If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file.
+
+```yaml
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ cloud-provider-name: external
+```
+
+Remove `enable-leader-migration` from the kube controller manager:
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kube-controller-manager-arg:
+ - enable-leader-migration
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/control-plane-role
+ operator: In
+ values:
+ - 'true'
+```
+
+6. Uncordon control plane nodes so that Azure cloud controller pods now run on nodes:
+
+```shell
+kubectl uncordon -l "node-role.kubernetes.io/control-plane=true"
+```
+
+7. Update the cluster. The `cloud-controller-manager` pods should now be running.
+
+```shell
+kubectl rollout status deployment -n kube-system cloud-controller-manager
+kubectl rollout status daemonset -n kube-system cloud-node-manager
+```
+
+8. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID:
+
+```shell
+kubectl describe nodes | grep "ProviderID"
+```
+
+9. (Optional) You can also disable leader migration after the upgrade, as leader migration is not required with only one cloud-controller-manager.
+ Update the `cloud-controller-manager` deployment to remove leader migration from the container arguments:
+
+```yaml
+- --enable-leader-migration=true
+```
+
+
+
+
+
+1. Update the cluster config to enable leader migration in `cluster.yml`:
+
+```yaml
+services:
+ kube-controller:
+ extra_args:
+ enable-leader-migration: "true"
+```
+
+Note that the cloud provider is still `azure` at this step:
+
+```yaml
+cloud_provider:
+ name: azure
+```
+
+2. Cordon the control plane nodes, so that Azure cloud controller pods run on nodes only after upgrading to the external cloud provider:
+
+```shell
+kubectl cordon -l "node-role.kubernetes.io/controlplane=true"
+```
+
+3. To install the Azure cloud controller manager, follow the same steps as when installing Azure cloud provider on a new cluster:
+- UI: Follow steps 1-10 of [Helm chart installation from UI](../set-up-cloud-providers/azure.md#helm-chart-installation-from-ui) to install the cloud controller manager chart.
+- CLI: Follow steps 1-4 of [Helm chart installation from CLI](../set-up-cloud-providers/azure.md#helm-chart-installation-from-cli) to install the cloud controller manager chart.
+
+4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `cloud-controller-manager` pods.
+
+5. To enable leader migration, add `--enable-leader-migration` to the container arguments of `cloud-controller-manager`:
+
+```shell
+kubectl -n kube-system patch deployment cloud-controller-manager \
+--type=json \
+-p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--enable-leader-migration"}]'
+```
+
+6. Update `cluster.yml` to change the cloud provider to `external` and remove the leader migration arguments from the kube-controller.
+
+```yaml
+rancher_kubernetes_engine_config:
+ cloud_provider:
+ name: external
+```
+
+Remove `enable-leader-migration` if you don't want it enabled in your cluster:
+
+```yaml
+services:
+ kube-controller:
+ extra_args:
+ enable-leader-migration: "true"
+```
+
+7. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well.
+
+8. Update the cluster. The `cloud-controller-manager` pods should now be running.
+
+```shell
+kubectl rollout status deployment -n kube-system cloud-controller-manager
+kubectl rollout status daemonset -n kube-system cloud-node-manager
+```
+
+9. The cloud provider is responsible for setting the ProviderID of the node. Verify that all nodes are initialized with the ProviderID:
+
+```shell
+kubectl describe nodes | grep "ProviderID"
+```
+
+10. (Optional) You can also disable leader migration after the upgrade, as leader migration is not required with only one cloud-controller-manager.
+ Update the `cloud-controller-manager` deployment to remove leader migration from the container arguments:
+
+```yaml
+- --enable-leader-migration=true
+```
+
+
+
+
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
index 7cd0bb6f3f9..afc0f04adce 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
@@ -108,7 +108,7 @@ Regarding CPU and memory, it is recommended that the different planes of Kuberne
For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
-For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/)
+For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.5/op-guide/hardware/)
## Networking Requirements
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
index a62ed8dc14d..df17bb26d22 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
@@ -184,9 +184,7 @@ To prevent issues when upgrading, the [Kubernetes upgrade best practices](https:
## Authorized Cluster Endpoint Support for RKE2 and K3s Clusters
-_Available as of v2.6.3_
-
-Authorized Cluster Endpoint (ACE) support has been added for registered RKE2 and K3s clusters. This support includes manual steps you will perform on the downstream cluster to enable the ACE. For additional information on the authorized cluster endpoint, click [here](../manage-clusters/access-clusters/authorized-cluster-endpoint.md).
+Rancher supports Authorized Cluster Endpoints (ACE) for registered RKE2 and K3s clusters. This support includes manual steps you will perform on the downstream cluster to enable the ACE. For additional information on the authorized cluster endpoint, click [here](../manage-clusters/access-clusters/authorized-cluster-endpoint.md).
:::note Notes:
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
index 00857004174..b49ca3f3ca3 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
@@ -332,7 +332,7 @@ Refer to the offical AWS upstream documentation for the [cloud controller manage
-Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github.
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
1. Add the Helm repository:
@@ -465,7 +465,7 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
-Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github.
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
1. Add the Helm repository:
@@ -737,7 +737,7 @@ nodeSelector:
10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully:
```shell
-kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+kubectl rollout status deployment -n kube-system aws-cloud-controller-manager
```
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure.md
index 8720aa0760e..c291376354a 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure.md
@@ -6,6 +6,17 @@ title: Setting up the Azure Cloud Provider
+:::note Important:
+
+In Kubernetes 1.30 and later, you must use an out-of-tree Azure cloud provider. The Azure cloud provider has been [removed completely](https://github.com/kubernetes/kubernetes/pull/122857), and won't work after an upgrade to Kubernetes 1.30. The steps listed below are still required to set up an Azure cloud provider. You can [set up an out-of-tree cloud provider](#using-the-out-of-tree-azure-cloud-provider) after completing the prerequisites for Azure.
+
+You can also [migrate from an in-tree to an out-of-tree Azure cloud provider](../migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-azure.md) on Kubernetes 1.29 and earlier. All existing clusters must migrate prior to upgrading to v1.30 in order to stay functional.
+
+Starting with Kubernetes 1.29, in-tree cloud providers have been disabled. You must disable `DisableCloudProviders` and `DisableKubeletCloudCredentialProvider` to use the in-tree Azure cloud provider. You can do this by setting `feature-gates=DisableCloudProviders=false` as an additional argument for the cluster's Kubelet, Controller Manager, and API Server in the advanced cluster configuration. Additionally, set `DisableKubeletCloudCredentialProvider=false` in the Kubelet's arguments to enable in-tree functionality for authenticating to Azure container registries for image pull credentials. See [upstream docs](https://github.com/kubernetes/kubernetes/pull/117503) for more details.
+
+Starting with Kubernetes version 1.26, in-tree persistent volume types `kubernetes.io/azure-disk` and `kubernetes.io/azure-file` are deprecated and will no longer be supported. For new clusters, [install the CSI drivers](#installing-csi-drivers), or migrate to the corresponding CSI drivers `disk.csi.azure.com` and `file.csi.azure.com` by following the [upstream migration documentation](https://learn.microsoft.com/en-us/azure/aks/csi-migrate-in-tree-volumes).
+:::
+
When using the `Azure` cloud provider, you can leverage the following capabilities:
- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group.
@@ -76,12 +87,15 @@ Only hosts expected to be load balancer back ends need to be in this group.
## RKE2 Cluster Set-up in Rancher
+:::note Important:
+This section is valid only for creating clusters with the in-tree cloud provider.
+:::
+
1. Choose "Azure" from the Cloud Provider drop-down in the Cluster Configuration section.
-1. * Supply the Cloud Provider Configuration. Note that Rancher will automatically create a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you will need to specify them before creating the cluster.
- * You can click on "Show Advanced" to see more of these automatically generated names and update them if
- necessary. Your Cloud Provider Configuration **must** match the fields in the Machine Pools section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group.
- * An example is provided below. You will modify it as needed.
+2. Supply the Cloud Provider Configuration. Note that Rancher automatically creates a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you must specify them before creating the cluster.
+ * Click **Show Advanced** to view or edit these automatically generated names. Your Cloud Provider Configuration **must** match the fields in the **Machine Pools** section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group.
+ * An example is provided below. Modify it as needed.
Example Cloud Provider Config
@@ -110,6 +124,492 @@ Only hosts expected to be load balancer back ends need to be in this group.
-1. Under the **Cluster Configuration > Advanced** section, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false`
+3. Under the **Cluster Configuration > Advanced** section, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false`
-1. Click the **Create** button to submit the form and create the cluster.
+4. Click **Create** to submit the form and create the cluster.
+
+## Cloud Provider Configuration
+
+Rancher automatically creates a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you will need to specify them before creating the cluster. You can check **RKE1 Node Templates** or **RKE2 Machine Pools** to view or edit these automatically generated names.
+
+**Refer to the full list of configuration options in the [upstream docs](https://cloud-provider-azure.sigs.k8s.io/install/configs/).**
+
+:::note
+1. `useInstanceMetadata` must be set to `true` for the cloud provider to correctly configure `providerID`.
+2. `excludeMasterFromStandardLB` must be set to `false` if you need to add nodes labeled `node-role.kubernetes.io/master` to the backend of the Azure Load Balancer (ALB).
+3. `loadBalancerSku` can be set to `basic` or `standard`. Basic SKU will be deprecated in September 2025. Refer to the [Azure upstream docs](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/public-ip-basic-upgrade-guidance#basic-sku-vs-standard-sku) for more information.
+:::
+
+Azure supports reading the cloud config from Kubernetes secrets. The secret is a serialized version of the azure.json file. When the secret is changed, the cloud controller manager reconstructs itself without restarting the pod. It is recommended for the Helm chart to read the Cloud Provider Config from the secret.
+
+Note that the chart reads the Cloud Provider Config from a given secret name in the `kube-system` namespace. Since Azure reads Kubernetes secrets, RBAC also needs to be configured. An example secret for the Cloud Provider Config is shown below. Modify it as needed and create the secret.
+
+ ```yaml
+# azure-cloud-config.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: azure-cloud-config
+ namespace: kube-system
+type: Opaque
+stringData:
+ cloud-config: |-
+ {
+ "cloud": "AzurePublicCloud",
+ "tenantId": "",
+ "subscriptionId": "",
+ "aadClientId": "",
+ "aadClientSecret": "",
+ "resourceGroup": "docker-machine",
+ "location": "westus",
+ "subnetName": "docker-machine",
+ "securityGroupName": "rancher-managed-kqmtsjgJ",
+ "securityGroupResourceGroup": "docker-machine",
+ "vnetName": "docker-machine-vnet",
+ "vnetResourceGroup": "docker-machine",
+ "primaryAvailabilitySetName": "docker-machine",
+ "routeTableResourceGroup": "docker-machine",
+ "cloudProviderBackoff": false,
+ "useManagedIdentityExtension": false,
+ "useInstanceMetadata": true,
+ "loadBalancerSku": "standard",
+ "excludeMasterFromStandardLB": false,
+ }
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/cluster-service: "true"
+ name: system:azure-cloud-provider-secret-getter
+rules:
+ - apiGroups: [""]
+resources: ["secrets"]
+resourceNames: ["azure-cloud-config"]
+verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ kubernetes.io/cluster-service: "true"
+ name: system:azure-cloud-provider-secret-getter
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:azure-cloud-provider-secret-getter
+ subjects:
+ - kind: ServiceAccount
+ name: azure-cloud-config
+ namespace: kube-system
+ ```
+
+## Using the Out-of-tree Azure Cloud Provider
+
+
+
+
+1. Select **External** from the **Cloud Provider** drop-down in the **Cluster Configuration** section.
+
+2. Prepare the Cloud Provider Configuration to set it in the next step. Note that Rancher automatically creates a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you must specify them before creating the cluster.
+ - Click **Show Advanced** to view or edit these automatically generated names. Your Cloud Provider Configuration **must** match the fields in the **Machine Pools** section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group.
+
+3. Under **Cluster Configuration > Advanced**, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false`.
+
+Note that the chart reads the Cloud Provider Config from the secret in the `kube-system` namespace. An example secret for the Cloud Provider Config is shown below. Modify it as needed. Refer to the full list of configuration options in the [upstream docs](https://cloud-provider-azure.sigs.k8s.io/install/configs/).
+
+ ```yaml
+apiVersion: helm.cattle.io/v1
+kind: HelmChart
+metadata:
+ name: azure-cloud-controller-manager
+ namespace: kube-system
+spec:
+ chart: cloud-provider-azure
+ repo: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo
+ targetNamespace: kube-system
+ bootstrap: true
+ valuesContent: |-
+ infra:
+ clusterName:
+ cloudControllerManager:
+ cloudConfigSecretName: azure-cloud-config
+ cloudConfig: null
+ clusterCIDR: null
+ enableDynamicReloading: 'true'
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+ allocateNodeCidrs: 'false'
+ hostNetworking: true
+ caCertDir: /etc/ssl
+ configureCloudRoutes: 'false'
+ enabled: true
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ value: 'true'
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: azure-cloud-config
+ namespace: kube-system
+type: Opaque
+stringData:
+ cloud-config: |-
+ {
+ "cloud": "AzurePublicCloud",
+ "tenantId": "",
+ "subscriptionId": "",
+ "aadClientId": "",
+ "aadClientSecret": "",
+ "resourceGroup": "docker-machine",
+ "location": "westus",
+ "subnetName": "docker-machine",
+ "securityGroupName": "rancher-managed-kqmtsjgJ",
+ "securityGroupResourceGroup": "docker-machine",
+ "vnetName": "docker-machine-vnet",
+ "vnetResourceGroup": "docker-machine",
+ "primaryAvailabilitySetName": "docker-machine",
+ "routeTableResourceGroup": "docker-machine",
+ "cloudProviderBackoff": false,
+ "useManagedIdentityExtension": false,
+ "useInstanceMetadata": true,
+ "loadBalancerSku": "standard",
+ "excludeMasterFromStandardLB": false,
+ }
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/cluster-service: "true"
+ name: system:azure-cloud-provider-secret-getter
+rules:
+ - apiGroups: [""]
+resources: ["secrets"]
+resourceNames: ["azure-cloud-config"]
+verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ kubernetes.io/cluster-service: "true"
+ name: system:azure-cloud-provider-secret-getter
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:azure-cloud-provider-secret-getter
+ subjects:
+ - kind: ServiceAccount
+ name: azure-cloud-config
+ namespace: kube-system
+ ```
+
+4. Click **Create** to submit the form and create the cluster.
+
+
+
+
+
+1. Choose **External** from the **Cloud Provider** drop-down in the **Cluster Options** section. This sets `--cloud-provider=external` for Kubernetes components.
+
+2. Install the `cloud-provider-azure` chart after the cluster finishes provisioning. Note that the cluster is not successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done [manually using CLI](#helm-chart-installation-from-cli), or via [Helm charts in UI](#helm-chart-installation-from-ui).
+
+Refer to the [official Azure upstream documentation](https://cloud-provider-azure.sigs.k8s.io/install/azure-ccm/) for more details on deploying the Cloud Controller Manager.
+
+
+
+
+### Helm Chart Installation from CLI
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes-sigs/cloud-provider-azure/tree/master/helm/cloud-provider-azure) can be found on Github.
+
+1. Create a `azure-cloud-config` secret with the required [cloud provider config](#cloud-provider-configuration).
+
+```shell
+kubectl apply -f azure-cloud-config.yaml
+```
+
+2. Add the Helm repository:
+
+```shell
+helm repo add azure-cloud-controller-manager https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo
+helm repo update
+```
+
+3. Create a `values.yaml` file with the following contents to override the default `values.yaml`:
+
+
+
+
+```yaml
+# values.yaml
+infra:
+ clusterName:
+cloudControllerManager:
+ cloudConfigSecretName: azure-cloud-config
+ cloudConfig: null
+ clusterCIDR: null
+ enableDynamicReloading: 'true'
+ configureCloudRoutes: 'false'
+ allocateNodeCidrs: 'false'
+ caCertDir: /etc/ssl
+ enabled: true
+ replicas: 1
+ hostNetworking: true
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ value: 'true'
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+```
+
+
+
+
+
+```yaml
+# values.yaml
+cloudControllerManager:
+ cloudConfigSecretName: azure-cloud-config
+ cloudConfig: null
+ clusterCIDR: null
+ enableDynamicReloading: 'true'
+ configureCloudRoutes: 'false'
+ allocateNodeCidrs: 'false'
+ caCertDir: /etc/ssl
+ enabled: true
+ replicas: 1
+ hostNetworking: true
+ nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+ node-role.kubernetes.io/control-plane: null
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/controlplane
+ value: 'true'
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+infra:
+ clusterName:
+```
+
+
+
+
+4. Install the Helm chart:
+
+```shell
+helm upgrade --install cloud-provider-azure azure-cloud-controller-manager/cloud-provider-azure -n kube-system --values values.yaml
+```
+
+Verify that the Helm chart installed successfully:
+
+```shell
+helm status cloud-provider-azure -n kube-system
+```
+
+5. (Optional) Verify that the cloud controller manager update succeeded:
+
+```shell
+kubectl rollout status deployment -n kube-system cloud-controller-manager
+kubectl rollout status daemonset -n kube-system cloud-node-manager
+```
+
+6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID:
+
+```shell
+kubectl describe nodes | grep "ProviderID"
+```
+
+### Helm Chart Installation from UI
+
+1. Click **☰**, then select the name of the cluster from the left navigation.
+
+2. Select **Apps** > **Repositories**.
+
+3. Click the **Create** button.
+
+4. Enter `https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo` in the **Index URL** field.
+
+5. Select **Apps** > **Charts** from the left navigation and install **cloud-provider-azure** chart.
+
+6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**.
+
+7. Replace `cloudConfig: /etc/kubernetes/azure.json` to read from the Cloud Config Secret and enable dynamic reloading:
+
+```yaml
+ cloudConfigSecretName: azure-cloud-config
+ enableDynamicReloading: 'true'
+```
+
+8. Update the following fields as required:
+
+```yaml
+ allocateNodeCidrs: 'false'
+ configureCloudRoutes: 'false'
+ clusterCIDR: null
+```
+
+
+
+
+9. Rancher-provisioned RKE2 nodes have the selector `node-role.kubernetes.io/control-plane` set to `true`. Update the nodeSelector:
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+```
+
+
+
+
+10. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector:
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/controlplane
+```
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+```
+
+
+
+11. Install the chart and confirm that the cloud controller and cloud node manager deployed successfully:
+
+```shell
+kubectl rollout status deployment -n kube-system cloud-controller-manager
+kubectl rollout status daemonset -n kube-system cloud-node-manager
+```
+
+12. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID:
+
+```shell
+kubectl describe nodes | grep "ProviderID"
+```
+
+### Installing CSI Drivers
+
+Install [Azure Disk CSI driver](https://github.com/kubernetes-sigs/azuredisk-csi-driver) or [Azure File CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) to access [Azure Disk](https://azure.microsoft.com/en-us/services/storage/disks/) or [Azure File](https://azure.microsoft.com/en-us/services/storage/disks/) volumes respectively.
+
+The steps to install the Azure Disk CSI driver are shown below. You can install the Azure File CSI Driver in a similar manner by following the [helm installation documentation](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/charts/README.md).
+
+::: note Important:
+
+Clusters must be provisioned using `Managed Disk` to use Azure Disk. You can configure this when creating **RKE1 Node Templates** or **RKE2 Machine Pools*.
+
+:::
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/charts/README.md) can be found on Github.
+
+1. Add and update the helm repository:
+
+```shell
+helm repo add azuredisk-csi-driver https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts
+helm repo update azuredisk-csi-driver
+```
+
+1. Install the chart as shown below, updating the --version argument as needed. Refer to the full list of latest chart configurations in the [upstream docs](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/charts/README.md#latest-chart-configuration).
+
+```shell
+helm install azuredisk-csi-driver azuredisk-csi-driver/azuredisk-csi-driver --namespace kube-system --version v1.30.1 --set controller.cloudConfigSecretName=azure-cloud-config --set controller.cloudConfigSecretNamespace=kube-system --set controller.runOnControlPlane=true
+```
+
+2. (Optional) Verify that the azuredisk-csi-driver installation succeeded:
+
+```shell
+kubectl --namespace=kube-system get pods --selector="app.kubernetes.io/name=azuredisk-csi-driver" --watch
+```
+
+3. Provision an example Storage Class:
+
+```shell
+cat </resourceGroups/
```
The result should show information about the new service principal:
+
```
{
"appId": "xxxx--xxx",
- "displayName": "",
- "name": "http://",
- "password": "",
- "tenant": ""
+ "displayName": "",
+ "name": "http://",
+ "password": "",
+ "tenant": ""
}
```
-You also need to add roles to the service principal so that it has privileges for communication with the AKS API. It also needs access to create and list virtual networks.
-
-Below is an example command for assigning the Contributor role to a service principal. Contributors can manage anything on AKS but cannot give access to others:
+The following creates a [Resource Group](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-cli) to contain your Azure resources:
```
-az role assignment create \
- --assignee $appId \
- --scope /subscriptions/$/resourceGroups/$ \
- --role Contributor
-```
-
-You can also create the service principal and give it Contributor privileges by combining the two commands into one. In this command, the scope needs to provide a full path to an Azure resource:
-
-```
-az ad sp create-for-rbac \
- --scope /subscriptions/$/resourceGroups/$ \
- --role Contributor
-```
-
-Create the Resource Group by running this command:
-
-```
-az group create --location AZURE_LOCATION_NAME --resource-group AZURE_RESOURCE_GROUP_NAME
+az group create --location --resource-group
```
### Setting Up the Service Principal from the Azure Portal
-You can also follow these instructions to set up a service principal and give it role-based access from the Azure Portal.
+Follow these instructions to set up a service principal and give it role-based access from the Azure Portal.
1. Go to the Microsoft Azure Portal [home page](https://portal.azure.com).
-
1. Click **Azure Active Directory**.
1. Click **App registrations**.
1. Click **New registration**.
-1. Enter a name. This will be the name of your service principal.
+1. Enter a name for your service principal.
1. Optional: Choose which accounts can use the service principal.
1. Click **Register**.
1. You should now see the name of your service principal under **Azure Active Directory > App registrations**.
@@ -101,7 +84,7 @@ To give role-based access to your service principal,
**Result:** Your service principal now has access to AKS.
-## 1. Create the AKS Cloud Credentials
+## Create the AKS Cloud Credentials
1. In the Rancher UI, click **☰ > Cluster Management**.
1. Click **Cloud Credentials**.
@@ -110,7 +93,7 @@ To give role-based access to your service principal,
1. Fill out the form. For help with filling out the form, see the [configuration reference.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md#cloud-credentials)
1. Click **Create**.
-## 2. Create the AKS Cluster
+## Create the AKS Cluster
Use Rancher to set up and configure your Kubernetes cluster.
@@ -124,7 +107,8 @@ Use Rancher to set up and configure your Kubernetes cluster.
You can access your cluster after its state is updated to **Active**.
-## Role-based Access Control
+## Configure Role-based Access Control
+
When provisioning an AKS cluster in the Rancher UI, RBAC is not configurable because it is required to be enabled.
RBAC is required for AKS clusters that are registered or imported into Rancher.
@@ -135,8 +119,8 @@ Assign the Rancher AKSv2 role to the service principal with the Azure Command Li
```
az role assignment create \
---assignee CLIENT_ID \
---scope "/subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP_NAME" \
+--assignee \
+--scope "/subscriptions//resourceGroups/" \
--role "Rancher AKSv2"
```
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md
index e6ecf6090ec..589668a2ac5 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md
@@ -95,10 +95,15 @@ This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-ra
These are the minimum set of permissions necessary to access the full functionality of Rancher's EKS driver. You'll need additional permissions for Rancher to provision the `Service Role` and `VPC` resources. If you create these resources **before** you create the cluster, they'll be available when you configure the cluster.
+:::note
+In EKS v1.23 and above, you must use the out-of-tree drivers for EBS-backed volumes. You need [specific permissions](#ebs-csi-driver-addon-permissions) to enable this add-on.
+:::
+
Resource | Description
---------|------------
Service Role | Provides permissions that allow Kubernetes to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions](#service-role-permissions).
VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions](#vpc-permissions).
+EBS CSI Driver add-on | Provides permissions that allow Kubernetes to interact with EBS and configure the cluster to enable the add-on (required for EKS v1.23 and above). Rancher can install the add-on with the following [EBS CSI Driver addon Permissions](#ebs-csi-driver-addon-permissions).
Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher.
@@ -129,6 +134,7 @@ Resource targeting uses `*` as the ARN of many of the resources created cannot b
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DeleteTags",
+ "ec2:DeleteLaunchTemplateVersions",
"ec2:DeleteLaunchTemplate",
"ec2:DeleteSecurityGroup",
"ec2:DeleteKeyPair",
@@ -314,6 +320,43 @@ These are permissions that are needed by Rancher to create a Virtual Private Clo
}
```
+### EBS CSI Driver addon Permissions
+
+The following are the required permissions for installing the Amazon EBS CSI Driver add-on.
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:GetRole",
+ "eks:DescribeAddonConfiguration",
+ "eks:UpdateAddon",
+ "eks:ListAddons",
+ "iam:CreateRole",
+ "iam:AttachRolePolicy",
+ "eks:DescribeAddon",
+ "iam:CreateOpenIDConnectProvider",
+ "iam:PassRole",
+ "eks:DescribeIdentityProviderConfig",
+ "eks:DeleteAddon",
+ "iam:ListOpenIDConnectProviders",
+ "iam:ListAttachedRolePolicies",
+ "eks:CreateAddon",
+ "eks:DescribeCluster",
+ "eks:DescribeAddonVersions",
+ "sts:AssumeRoleWithWebIdentity",
+ "eks:AssociateIdentityProviderConfig",
+ "eks:ListIdentityProviderConfigs"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+```
+
## Syncing
The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)
diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md
index fb19bea8379..43951fe694b 100644
--- a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md
+++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md
@@ -46,7 +46,7 @@ If you need to create a private registry, refer to the documentation pages for y
:::
1. Select a namespace for the registry.
-1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password.
+1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use Docker Hub, provide your Docker Hub username and password.
1. Click **Save**.
**Result:**
@@ -89,7 +89,7 @@ Before v2.6, secrets were required to be in a project scope. Projects are no lon
:::
1. Select a namespace for the registry.
-1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password.
+1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use Docker Hub, provide your Docker Hub username and password.
1. Click **Save**.
**Result:**
diff --git a/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/shutdown-vm.md b/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/shutdown-vm.md
new file mode 100644
index 00000000000..78c437eed81
--- /dev/null
+++ b/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/shutdown-vm.md
@@ -0,0 +1,65 @@
+---
+title: Graceful Shutdown for VMware vSphere Virtual Machines
+---
+
+
+
+
+
+In Rancher v2.8.3 and later, you can configure the graceful shutdown of virtual machines (VMs) for VMware vSphere node driver clusters. Graceful shutdown introduces a delay before the VM is forcibly deleted, which allows time for terminating any running processes and open connections.
+
+In RKE2/K3s, you can set up graceful shutdown when you create the cluster, or edit the cluster configuration to add it afterward.
+
+In RKE, you can edit node templates to similar results.
+
+:::note
+
+Since Rancher can't detect the platform of an imported cluster, you cannot enable graceful shutdown on VMware vSphere clusters you have imported.
+
+:::
+
+## Enable Graceful Shutdown During VMware vSphere Cluster Creation
+
+
+
+
+In RKE2/K3s, you can configure new VMware vSphere clusters with graceful shutdown for VMs:
+
+1. Click **☰ > Cluster Management**.
+1. Click **Create** and select **VMware vSphere** to provision a new cluster.
+1. Under **Machine Pools > Scheduling**, in the **Graceful Shutdown Timeout** field, enter an integer value greater than 0. The value you enter is the amount of time in seconds Rancher waits before deleting VMs on the cluster. If the value is set to `0`, graceful shutdown is disabled.
+
+
+
+
+In RKE, you can't directly configure a new cluster with graceful shutdown. However, you can configure node templates which automatically create node pools with graceful shutdown enabled. The node template can then be used to provision new VMware vSphere clusters that have a graceful shutdown delay.
+
+1. Click **☰ > Cluster Management**.
+1. From the left navigation, select **RKE1 Configuration > Node Templates**.
+1. Click **Add Template** and select **vSphere** to create a node template.
+1. Under **2. Scheduling**, in the **Graceful Shutdown Timeout** field, enter an integer value greater than 0. The value you enter is the amount of time in seconds Rancher waits before deleting VMs on the cluster. If the value is set to `0`, graceful shutdown is disabled.
+
+When you [use the newly-created node template to create node pools](../use-new-nodes-in-an-infra-provider.md), the nodes will gracefully shutdown of VMs according to the **Graceful Shutdown Timeout** value you have set.
+
+
+
+
+## Enable Graceful Shutdown in Existing RKE2/K3s Clusters
+
+In RKE2/K3s, you can edit the configuration of an existing VMware vSphere cluster to enable graceful shutdown, which adds a delay before deleting VMs.
+
+1. Click **☰ > Cluster Management**.
+1. On the **Clusters** page, find the VMware vSphere hosted cluster you want to edit. Click **⋮** at the end of the row associated with the cluster. Select **Edit Config**.
+1. Under **Machine Pools > Scheduling**, in the **Graceful Shutdown Timeout** field, enter an integer value greater than 0. The value you enter is the amount of time in seconds Rancher waits before deleting VMs on the cluster. If the value is set to `0`, graceful shutdown is disabled.
+
+## Enable Graceful Shutdown in Existing RKE Clusters
+
+In RKE, you can't directly edit an existing cluster's configuration to add graceful shutdown to existing VMware vSphere clusters. However, you can edit the configuration of existing node templates. As noted in [Updating a Node Template](../../../../../reference-guides/user-settings/manage-node-templates.md#updating-a-node-template), all node pools using the node template automatically use the updated information when new nodes are added to the cluster.
+
+To edit an existing node template to enable graceful shutdown:
+
+1. Click **☰ > Cluster Management**.
+1. From the left navigation, select **RKE1 Configuration > Node Templates**.
+1. Find the VMware vSphere node template you want to edit. Click **⋮** at the end of the row associated with the template. Select **Edit**.
+1. Under **2. Scheduling**, in the **Graceful Shutdown Timeout** field, enter an integer value greater than 0. The value you enter is the amount of time in seconds Rancher waits before deleting VMs on the cluster. If the value is set to `0`, graceful shutdown is disabled.
+1. Click **Save**.
diff --git a/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/vsphere.md b/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/vsphere.md
index ed7dbb7cd35..553905b2a0b 100644
--- a/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/vsphere.md
+++ b/docs/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/vsphere.md
@@ -15,9 +15,9 @@ Rancher can provision nodes in vSphere and install Kubernetes on them. When crea
A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role.
-## VMware vSphere Enhancements in Rancher v2.3
+## VMware vSphere Enhancements
-The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements:
+The vSphere node templates allow you to bring cloud operations on-premises with the following enhancements:
### Self-healing Node Pools
@@ -39,12 +39,6 @@ For the fields to be populated, your setup needs to fulfill the [prerequisites.]
You can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html)
-### Video Walkthrough of v2.3.3 Node Template Features
-
-In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters.
-
-
-
## Creating a VMware vSphere Cluster
In [this section,](provision-kubernetes-clusters-in-vsphere.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
index 8db87b8b982..7a6a5adea56 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
@@ -19,11 +19,11 @@ The kubeconfig file and its contents are specific to each cluster. It can be dow
1. Find the cluster whose kubeconfig you want to download, and select **⁝** at the end of the row.
1. Select **Download KubeConfig** from the submenu.
-You will need a separate kubeconfig file for each cluster that you have access to in Rancher.
+You need a separate kubeconfig file for each cluster that you have access to in Rancher.
-After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster.
+After you download the kubeconfig file, you are able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster.
-If admins have [kubeconfig token generation turned off](../../../../reference-guides/about-the-api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), the kubeconfig file requires [rancher cli](./authorized-cluster-endpoint.md) to be present in your PATH.
+If admins have [kubeconfig token generation turned off](../../../../api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), the kubeconfig file requires that the [Rancher CLI](../../../../reference-guides/cli-with-rancher/rancher-cli.md) to be present in your PATH.
### Two Authentication Methods for RKE Clusters
@@ -36,7 +36,7 @@ For RKE clusters, the kubeconfig file allows you to be authenticated in two ways
This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher.
-To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster)
+To use the authorized cluster endpoint, you need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster)
These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md b/docs/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
index 736bc664d8c..482f5bf22ef 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
@@ -122,7 +122,7 @@ Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
## Cleaning up Nodes
-
+
Before you run the following commands, first remove the node through the Rancher UI.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
index c40d3000816..0c89ef1162a 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
@@ -19,7 +19,7 @@ To provision new storage for your workloads, follow these steps:
1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage)
2. [Use the Storage Class for Pods Deployed with a StatefulSet.](#2-use-the-storage-class-for-pods-deployed-with-a-statefulset)
-### Prerequisites
+## Prerequisites
- To set up persistent storage, the `Manage Volumes` [role](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required.
- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider.
@@ -42,7 +42,7 @@ hostPath | `host-path`
To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.](../../../../advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)
-### 1. Add a storage class and configure it to use your storage
+## 1. Add a storage class and configure it to use your storage
These steps describe how to set up a storage class at the cluster level.
@@ -59,7 +59,7 @@ These steps describe how to set up a storage class at the cluster level.
For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters).
-### 2. Use the Storage Class for Pods Deployed with a StatefulSet
+## 2. Use the Storage Class for Pods Deployed with a StatefulSet
StatefulSets manage the deployment and scaling of Pods while maintaining a sticky identity for each Pod. In this StatefulSet, we will configure a VolumeClaimTemplate. Each Pod managed by the StatefulSet will be deployed with a PersistentVolumeClaim based on this VolumeClaimTemplate. The PersistentVolumeClaim will refer to the StorageClass that we created. Therefore, when each Pod managed by the StatefulSet is deployed, it will be bound to dynamically provisioned storage using the StorageClass defined in its PersistentVolumeClaim.
@@ -70,7 +70,7 @@ StatefulSets manage the deployment and scaling of Pods while maintaining a stick
1. Click **StatefulSet**.
1. In the **Volume Claim Templates** tab, click **Add Claim Template**.
1. Enter a name for the persistent volume.
-1. In the **StorageClass* field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet.
+1. In the **StorageClass** field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet.
1. In the **Mount Point** field, enter the path that the workload will use to access the volume.
1. Click **Launch**.
@@ -84,8 +84,8 @@ To attach the PVC to an existing workload,
1. Go to the workload that will use storage provisioned with the StorageClass that you cared at click **⋮ > Edit Config**.
1. In the **Volume Claim Templates** section, click **Add Claim Template**.
1. Enter a persistent volume name.
-1. In the **StorageClass* field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet.
+1. In the **StorageClass** field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet.
1. In the **Mount Point** field, enter the path that the workload will use to access the volume.
1. Click **Save**.
-**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage.
\ No newline at end of file
+**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
index 4be791f5cc3..194d8284b3f 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
@@ -20,12 +20,12 @@ To set up storage, follow these steps:
2. [Add a PersistentVolume that refers to the persistent storage.](#2-add-a-persistentvolume-that-refers-to-the-persistent-storage)
3. [Use the Storage Class for Pods Deployed with a StatefulSet.](#3-use-the-storage-class-for-pods-deployed-with-a-statefulset)
-### Prerequisites
+## Prerequisites
- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference)
- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider.
-### 1. Set up persistent storage
+## 1. Set up persistent storage
Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned.
@@ -33,7 +33,7 @@ The steps to set up a persistent storage device will differ based on your infras
If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [Cloud Native Storage with Longhorn](../../../../../integrations-in-rancher/longhorn/longhorn.md).
-### 2. Add a PersistentVolume that refers to the persistent storage
+## 2. Add a PersistentVolume that refers to the persistent storage
These steps describe how to set up a PersistentVolume at the cluster level in Kubernetes.
@@ -52,7 +52,7 @@ These steps describe how to set up a PersistentVolume at the cluster level in Ku
**Result:** Your new persistent volume is created.
-### 3. Use the Storage Class for Pods Deployed with a StatefulSet
+## 3. Use the Storage Class for Pods Deployed with a StatefulSet
StatefulSets manage the deployment and scaling of Pods while maintaining a sticky identity for each Pod. In this StatefulSet, we will configure a VolumeClaimTemplate. Each Pod managed by the StatefulSet will be deployed with a PersistentVolumeClaim based on this VolumeClaimTemplate. The PersistentVolumeClaim will refer to the PersistentVolume that we created. Therefore, when each Pod managed by the StatefulSet is deployed, it will be bound a PersistentVolume as defined in its PersistentVolumeClaim.
@@ -86,4 +86,4 @@ The following steps describe how to assign persistent storage to an existing wor
1. In the **Mount Point** field, enter the path that the workload will use to access the volume.
1. Click **Launch**.
-**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC.
\ No newline at end of file
+**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/docs/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
index 7daaab8504b..6a95a95b3ae 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
@@ -304,7 +304,7 @@ cloud-provider|-|Cloud provider type|
|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned|
|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: `::`|
|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`|
-|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]|
+|estimator|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]|
|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`|
|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down|
|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down|
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
index fbbe7813b4e..b8c2133a475 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
@@ -76,7 +76,7 @@ To manage individual nodes, browse to the cluster that you want to manage and th
## Viewing a Node in the Rancher API
-Select this option to view the node's [API endpoints](../../../reference-guides/about-the-api/about-the-api.md).
+Select this option to view the node's [API endpoints](../../../api/quickstart.md).
## Deleting a Node
@@ -100,7 +100,7 @@ For [nodes hosted by an infrastructure provider](../launch-kubernetes-with-ranch
1. In the upper left corner, click **☰ > Cluster Management**.
1. On the **Clusters** page, go to the cluster where you want to SSH into a node and click the name of the cluster.
-1. On the **Machine Pools** tab, find the node that you want to remote into and click **⋮ > Download SSH Key**. A ZIP file containing files used for SSH will be downloaded.
+1. On the **Machine Pools** tab, find the node that you want to remote into and click **⋮ > Download SSH Key**. A ZIP file containing files used for SSH is then downloaded.
1. Extract the ZIP file to any location.
1. Open Terminal. Change your location to the extracted ZIP file.
1. Enter the following command:
@@ -111,13 +111,13 @@ For [nodes hosted by an infrastructure provider](../launch-kubernetes-with-ranch
## Cordoning a Node
-_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it.
+_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it.
## Draining a Node
_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption.
-- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod.
+- For pods with a replica set, the pod is replaced by a new pod that is scheduled to a new node. Additionally, if the pod is part of a service, then clients are automatically redirected to the new pod.
- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it.
@@ -127,20 +127,21 @@ However, you can override the conditions draining when you initiate the drain. Y
### Aggressive and Safe Draining Options
-When you configure the upgrade strategy for the cluster, you will be able to enable node draining. If node draining is enabled, you will be able to configure how pods are deleted and rescheduled.
+When you configure the upgrade strategy for the cluster, you can enable node draining. If node draining is enabled, you are able to configure how pods are deleted and rescheduled.
- **Aggressive Mode**
In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods.
- Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods.
+ Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir is deleted once the pod is removed from the node. Choosing aggressive mode deletes these pods.
- **Safe Mode**
- If a node has standalone pods or ephemeral data it will be cordoned but not drained.
+ If a node has stand-alone pods or ephemeral data it is cordoned but not drained.
+
### Grace Period
-The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used.
+The timeout given to each pod for cleaning things up so they have a chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to an external storage. If negative, the default value specified in the pod is used.
### Timeout
@@ -156,17 +157,17 @@ The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was n
If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node.
-If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`.
+If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which then stops the drain process and changes the node's state to `cordoned`.
-Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node.
+Once drain successfully completes, the node is in a state of `drained`. You can then power off or delete the node.
**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/).
## Labeling a Node to be Ignored by Rancher
-Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster.
+Certain solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster.
-Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI.
+Since the node never finishes registering, it is always shown as unhealthy in the Rancher UI.
In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing.
@@ -181,16 +182,16 @@ There is an [open issue](https://github.com/rancher/rancher/issues/24172) in whi
### Labeling Nodes to be Ignored with kubectl
-To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label:
+To add a node that is ignored by Rancher, use `kubectl` to create a node that has the following label:
```
cattle.rancher.io/node-status: ignore
```
-**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`.
+**Result**: If you add the node to a cluster, Rancher skips syncing with this node. The node can still be part of the cluster and can be listed with `kubectl`.
-If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI.
+If the label is added before the node is added to the cluster, the node is not shown in the Rancher UI.
-If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI.
+If the label is added after the node is added to a Rancher cluster, the node is not removed from the UI.
-If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings in the Rancher API under `v3/settings/ignore-node-name`.
+If you delete the node from the Rancher server using the Rancher UI or API, the node is not removed from the cluster if the `nodeName` is listed in the Rancher settings in the Rancher API under `v3/settings/ignore-node-name`.
diff --git a/docs/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md b/docs/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
index 09c74502119..a333b7b4dc4 100644
--- a/docs/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
+++ b/docs/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
@@ -173,12 +173,12 @@ To add members:
### 4. Optional: Add Resource Quotas
-Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](projects-and-namespaces.md).
+Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md).
To add a resource quota,
1. In the **Resource Quotas** tab, click **Add Resource**.
-1. Select a **Resource Type**. For more information, see [Resource Quotas.](projects-and-namespaces.md).
+1. Select a **Resource Type**. For more information, see [Resource Quotas.](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md).
1. Enter values for the **Project Limit** and the **Namespace Default Limit**.
1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)
1. Click **Create**.
diff --git a/docs/how-to-guides/new-user-guides/manage-namespaces.md b/docs/how-to-guides/new-user-guides/manage-namespaces.md
index 48ae6879c01..5d92c7331a2 100644
--- a/docs/how-to-guides/new-user-guides/manage-namespaces.md
+++ b/docs/how-to-guides/new-user-guides/manage-namespaces.md
@@ -25,11 +25,11 @@ To manage permissions in a vanilla Kubernetes cluster, cluster admins configure
:::note
-If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace.
+If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](#creating-namespaces) to ensure that you will have permission to access the namespace.
:::
-### Creating Namespaces
+## Creating Namespaces
Create a new namespace to isolate apps and resources in a project.
@@ -50,7 +50,7 @@ When working with project resources that you can assign to a namespace (i.e., [w
**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace.
-### Moving Namespaces to Another Project
+## Moving Namespaces to Another Project
Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application.
@@ -71,7 +71,7 @@ Cluster admins and members may occasionally need to move a namespace to another
**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project.
-### Editing Namespace Resource Quotas
+## Editing Namespace Resource Quotas
You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources.
diff --git a/docs/integrations-in-rancher/cis-scans/configuration-reference.md b/docs/integrations-in-rancher/cis-scans/configuration-reference.md
index 0403956be56..3394bc2702b 100644
--- a/docs/integrations-in-rancher/cis-scans/configuration-reference.md
+++ b/docs/integrations-in-rancher/cis-scans/configuration-reference.md
@@ -14,7 +14,7 @@ To configure the custom resources, go to the **Cluster Dashboard** To configure
1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**.
1. In the left navigation bar, click **CIS Benchmark**.
-### Scans
+## Scans
A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed.
@@ -31,7 +31,7 @@ spec:
scanProfileName: rke-profile-hardened
```
-### Profiles
+## Profiles
A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark.
@@ -66,7 +66,7 @@ spec:
- "1.1.21"
```
-### Benchmark Versions
+## Benchmark Versions
A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark.
diff --git a/docs/integrations-in-rancher/cis-scans/custom-benchmark.md b/docs/integrations-in-rancher/cis-scans/custom-benchmark.md
index 47853e45c14..4ec353cc60b 100644
--- a/docs/integrations-in-rancher/cis-scans/custom-benchmark.md
+++ b/docs/integrations-in-rancher/cis-scans/custom-benchmark.md
@@ -17,7 +17,7 @@ When a cluster scan is run, you need to select a Profile which points to a speci
Follow all the steps below to add a custom Benchmark Version and run a scan using it.
-### 1. Prepare the Custom Benchmark Version ConfigMap
+## 1. Prepare the Custom Benchmark Version ConfigMap
To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan.
@@ -42,7 +42,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom
kubectl create configmap -n foo --from-file=
```
-### 2. Add a Custom Benchmark Version to a Cluster
+## 2. Add a Custom Benchmark Version to a Cluster
1. In the upper left corner, click **☰ > Cluster Management**.
1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**.
@@ -54,7 +54,7 @@ To prepare a custom benchmark version ConfigMap, suppose we want to add a custom
1. Add the minimum and maximum Kubernetes version limits applicable, if any.
1. Click **Create**.
-### 3. Create a New Profile for the Custom Benchmark Version
+## 3. Create a New Profile for the Custom Benchmark Version
To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version.
@@ -66,7 +66,7 @@ To run a scan using your custom benchmark version, you need to add a new Profile
1. Choose the Benchmark Version from the dropdown.
1. Click **Create**.
-### 4. Run a Scan Using the Custom Benchmark Version
+## 4. Run a Scan Using the Custom Benchmark Version
Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version.
diff --git a/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md b/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
index 9669219ec33..e2199645e4d 100644
--- a/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
+++ b/docs/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
@@ -18,11 +18,10 @@ In order to deploy and run the adapter successfully, you need to ensure its vers
:::
| Rancher Version | Adapter Version |
-|-----------------|:----------------:|
-| v2.8.0 | v103.0.0+up3.0.0 |
-| v2.8.1 | v103.0.0+up3.0.0 |
-| v2.8.2 | v103.0.0+up3.0.0 |
-| v2.8.3 | v103.0.1+up3.0.1 |
+|-----------------|------------------|
+| v2.9.2 | v104.0.0+up4.0.0 |
+| v2.9.1 | v104.0.0+up4.0.0 |
+| v2.9.0 | v104.0.0+up4.0.0 |
### 1. Gain Access to the Local Cluster
diff --git a/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md b/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md
index 6eecac1132a..4dbda92bf19 100644
--- a/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md
+++ b/docs/integrations-in-rancher/cloud-marketplace/supportconfig.md
@@ -1,5 +1,5 @@
---
-title: Supportconfig bundle
+title: Supportconfig Bundle
---
@@ -12,7 +12,7 @@ These bundles can be created through Rancher or through direct access to the clu
> **Note:** Only admin users can generate/download supportconfig bundles, regardless of method.
-### Accessing through Rancher
+## Accessing Through Rancher
First, click on the hamburger menu. Then click the `Get Support` button.
@@ -24,7 +24,7 @@ In the next page, click on the `Generate Support Config` button.

-### Accessing without rancher
+## Accessing Without Rancher
First, generate a kubeconfig for the cluster that Rancher is installed on.
diff --git a/docs/integrations-in-rancher/cluster-api/cluster-api.md b/docs/integrations-in-rancher/cluster-api/cluster-api.md
index 32e124ba058..3a3bf0f3302 100644
--- a/docs/integrations-in-rancher/cluster-api/cluster-api.md
+++ b/docs/integrations-in-rancher/cluster-api/cluster-api.md
@@ -6,7 +6,7 @@ title: Cluster API (CAPI) with Rancher Turtles
-[Rancher Turtles](https://turtles.docs.rancher.com/) is a [Rancher extension](../rancher-extensions.md) that manages the lifecycle of provisioned Kubernetes clusters, by providing integration between your Cluster API (CAPI) and Rancher. With Rancher Turtles, you can:
+[Rancher Turtles](https://turtles.docs.rancher.com/) is a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/#operators-in-kubernetes) that manages the lifecycle of provisioned Kubernetes clusters, by providing integration between your Cluster API (CAPI) and Rancher. With Rancher Turtles, you can:
- Import CAPI clusters into Rancher, by installing the Rancher Cluster Agent in CAPI provisioned clusters.
- Configure the [CAPI Operator](https://turtles.docs.rancher.com/reference-guides/rancher-turtles-chart/values#cluster-api-operator-values).
diff --git a/docs/integrations-in-rancher/cluster-api/overview.md b/docs/integrations-in-rancher/cluster-api/overview.md
index a2878c3ab04..e2e95fb3137 100644
--- a/docs/integrations-in-rancher/cluster-api/overview.md
+++ b/docs/integrations-in-rancher/cluster-api/overview.md
@@ -185,7 +185,7 @@ For detailed information on the values supported by the chart and their usage, r
:::note
-Remember that if you opt for this installation option, you must manage the CAPI Operator installation yourself. You can follow the [CAPI Operator guide](https://turtles.docs.rancher.com/tasks/capi-operator/intro) in the Rancher Turtles documentation for assistance.
+Remember that if you opt for this installation option, you must manage the CAPI Operator installation yourself. You can follow the [CAPI Operator guide](https://turtles.docs.rancher.com/contributing/install_capi_operator) in the Rancher Turtles documentation for assistance.
:::
diff --git a/docs/integrations-in-rancher/fleet/overview.md b/docs/integrations-in-rancher/fleet/overview.md
index 190f9b8906f..c2787aaa421 100644
--- a/docs/integrations-in-rancher/fleet/overview.md
+++ b/docs/integrations-in-rancher/fleet/overview.md
@@ -63,6 +63,8 @@ The Helm chart in the git repository must include its dependencies in the charts
- **Temporary Workaround**: By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow).
+- **Debug logging**: To enable debug logging of Fleet components, create a new **fleet** entry in the existing **rancher-config** ConfigMap in the **cattle-system** namespace with the value `{"debug": 1, "debugLevel": 1}`. The Fleet application restarts after you save the ConfigMap.
+
## Documentation
-The Fleet documentation is at https://fleet.rancher.io/.
\ No newline at end of file
+See the [official Fleet documentation](https://fleet.rancher.io/) to learn more.
diff --git a/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md b/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md
index 1570da5b448..6261f3820b2 100644
--- a/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md
+++ b/docs/integrations-in-rancher/fleet/use-fleet-behind-a-proxy.md
@@ -30,7 +30,20 @@ When adding Fleet agent environment variables for the proxy, replace
## Setting Environment Variables in the Rancher UI
-To add the environment variable to an existing cluster,
+To add the environment variable to an existing cluster:
+
+
+
+
+1. Click **☰ > Cluster Management**.
+1. Go to the cluster where you want to add environment variables and click **⋮ > Edit Config**.
+1. Click **Agent Environment Vars** under **Cluster configuration**.
+1. Click **Add**.
+1. Enter the [required environment variables](#required-environment-variables)
+1. Click **Save**.
+
+
+
1. Click **☰ > Cluster Management**.
1. Go to the cluster where you want to add environment variables and click **⋮ > Edit Config**.
@@ -39,6 +52,9 @@ To add the environment variable to an existing cluster,
1. Enter the [required environment variables](#required-environment-variables)
1. Click **Save**.
+
+
+
**Result:** The Fleet agent works behind a proxy.
## Setting Environment Variables on Private Nodes
@@ -55,4 +71,4 @@ export HTTP_PROXY=http://${proxy_private_ip}:8888
export HTTPS_PROXY=http://${proxy_private_ip}:8888
export NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
-```
\ No newline at end of file
+```
diff --git a/docs/integrations-in-rancher/harvester/overview.md b/docs/integrations-in-rancher/harvester/overview.md
index d22afe15965..edd54a6f557 100644
--- a/docs/integrations-in-rancher/harvester/overview.md
+++ b/docs/integrations-in-rancher/harvester/overview.md
@@ -8,7 +8,7 @@ title: Overview
Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application.
-### Feature Flag
+## Feature Flag
The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md) for more information on feature flags in Rancher.
@@ -22,7 +22,7 @@ To navigate to the Harvester cluster, click **☰ > Virtualization Management**.
* Users may import a Harvester cluster only on the Virtualization Management page. Importing a cluster on the Cluster Management page is not supported, and a warning will advise you to return to the VM page to do so.
-### Harvester Node Driver
+## Harvester Node Driver
The [Harvester node driver](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) is generally available for RKE and RKE2 options in Rancher. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page.
@@ -30,7 +30,7 @@ Harvester allows `.ISO` images to be uploaded and displayed through the Harveste
See [Provisioning Drivers](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher.
-### Port Requirements
+## Port Requirements
The port requirements for the Harvester cluster can be found [here](https://docs.harvesterhci.io/v1.1/install/requirements#networking).
diff --git a/docs/integrations-in-rancher/integrations-in-rancher.md b/docs/integrations-in-rancher/integrations-in-rancher.md
new file mode 100644
index 00000000000..08b0e878e6f
--- /dev/null
+++ b/docs/integrations-in-rancher/integrations-in-rancher.md
@@ -0,0 +1,18 @@
+---
+title: Integrations in Rancher
+---
+
+
+
+
+
+Prime is the Rancher ecosystem’s enterprise offering, with additional security, extended lifecycles, and access to Prime-exclusive documentation. Rancher Prime installation assets are hosted on a trusted SUSE registry, owned and managed by Rancher. The trusted Prime registry includes only stable releases that have been community-tested.
+
+Prime also offers options for production support, as well as add-ons to your subscription that tailor to your commercial needs.
+
+To learn more and get started with Rancher Prime, please visit [this page](https://www.rancher.com/quick-start).
+
+import DocCardList from '@theme/DocCardList';
+import { useCurrentSidebarCategory } from '@docusaurus/theme-common/internal';
+
+
diff --git a/docs/integrations-in-rancher/integrations-in-rancher.mdx b/docs/integrations-in-rancher/integrations-in-rancher.mdx
deleted file mode 100644
index 5f75b398ab7..00000000000
--- a/docs/integrations-in-rancher/integrations-in-rancher.mdx
+++ /dev/null
@@ -1,54 +0,0 @@
----
-title: Integrations in Rancher
----
-
-
-
-
-
-import {Card, CardSection} from '@site/src/components/CardComponents';
-import {RocketRegular} from '@fluentui/react-icons';
-
-Prime is the Rancher ecosystem’s enterprise offering, with additional security, extended lifecycles, and access to Prime-exclusive documentation. Rancher Prime installation assets are hosted on a trusted SUSE registry, owned and managed by Rancher. The trusted Prime registry includes only stable releases that have been community-tested.
-
-Prime also offers options for production support, as well as add-ons to your subscription that tailor to your commercial needs.
-
-To learn more and get started with Rancher Prime, please visit [this page](https://www.rancher.com/quick-start).
-
- }
->
-
-
-
-
-
-
-
-
-
diff --git a/docs/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/docs/integrations-in-rancher/istio/cpu-and-memory-allocations.md
index d61b13089cd..2566ee81668 100644
--- a/docs/integrations-in-rancher/istio/cpu-and-memory-allocations.md
+++ b/docs/integrations-in-rancher/istio/cpu-and-memory-allocations.md
@@ -45,7 +45,7 @@ To configure the resources allocated to an Istio component,
1. In the left navigation bar, click **Apps**.
1. Click **Installed Apps**.
1. Go to the `istio-system` namespace. In one of the Istio workloads, such as `rancher-istio`, click **⋮ > Edit/Upgrade**.
-1. Click **Upgrade** to edit the base components via changes to the values.yaml or add an [overlay file](configuration-options/configuration-options.md#overlay-file). For more information about editing the overlay file, see [this section.](cpu-and-memory-allocations.md#editing-the-overlay-file)
+1. Click **Upgrade** to edit the base components via changes to the values.yaml or add an [overlay file](configuration-options/configuration-options.md#overlay-file). For more information about editing the overlay file, see [this section.](#editing-the-overlay-file)
1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations.
1. Click **Upgrade**. to rollout changes
diff --git a/docs/integrations-in-rancher/istio/istio.md b/docs/integrations-in-rancher/istio/istio.md
index 0f865ef5363..ec3cdaa0ba4 100644
--- a/docs/integrations-in-rancher/istio/istio.md
+++ b/docs/integrations-in-rancher/istio/istio.md
@@ -43,10 +43,14 @@ It also includes the following:
### Kiali
-Kiali is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them.
+[Kiali](https://kiali.io/) is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them.
You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component.
+:::note
+For Istio installations `103.1.0+up1.19.6` and later, Kiali uses a token value for its authentication strategy. The name of the Kiali service account in Rancher is `kiali`. Use this name if you are writing commands that require you to enter the name of the Kiali service account (for example, if you are trying to generate or retrieve a session token). For more information, refer to the [Kiali token authentication FAQ](https://kiali.io/docs/faq/authentication/).
+:::
+
### Jaeger
Our Istio installer includes a quick-start, all-in-one installation of [Jaeger,](https://www.jaegertracing.io/) a tool used for tracing distributed systems.
@@ -71,6 +75,10 @@ To remove Istio components from a cluster, namespace, or workload, refer to the
> By default, only cluster-admins have access to Kiali. For instructions on how to allow admin, edit or views roles to access them, see [this section.](rbac-for-istio.md)
+:::note
+For Istio installations version `103.1.0+up1.19.6` and later, Kiali uses a token value for its authentication strategy. The name of the Kiali service account in Rancher is `kiali`. Use this name if you are writing commands that require you to enter the name of the Kiali service account (for example, if you are trying to generate or retrieve a session token). For more information, refer to the [Kiali token authentication FAQ](https://kiali.io/docs/faq/authentication/).
+:::
+
After Istio is set up in a cluster, Grafana, Prometheus, and Kiali are available in the Rancher UI.
To access the Grafana and Prometheus visualizations,
diff --git a/docs/integrations-in-rancher/logging/logging-architecture.md b/docs/integrations-in-rancher/logging/logging-architecture.md
index f4b716a6c2e..ec56b8d1ef6 100644
--- a/docs/integrations-in-rancher/logging/logging-architecture.md
+++ b/docs/integrations-in-rancher/logging/logging-architecture.md
@@ -10,7 +10,7 @@ This section summarizes the architecture of the Rancher logging application.
For more details about how the Logging operator works, see the [official documentation.](https://kube-logging.github.io/docs/#architecture)
-### How the Logging Operator Works
+## How the Logging Operator Works
The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system.
diff --git a/docs/integrations-in-rancher/logging/logging-helm-chart-options.md b/docs/integrations-in-rancher/logging/logging-helm-chart-options.md
index d68865a3afc..2c1a79e4132 100644
--- a/docs/integrations-in-rancher/logging/logging-helm-chart-options.md
+++ b/docs/integrations-in-rancher/logging/logging-helm-chart-options.md
@@ -6,7 +6,7 @@ title: rancher-logging Helm Chart Options
-### Enable/Disable Windows Node Logging
+## Enable/Disable Windows Node Logging
You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`.
@@ -21,7 +21,7 @@ Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists whe
:::
-### Working with a Custom Docker Root Directory
+## Working with a Custom Docker Root Directory
If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`.
@@ -31,11 +31,11 @@ Note that this only affects Linux nodes.
If there are any Windows nodes in the cluster, the change will not be applicable to those nodes.
-### Adding NodeSelector Settings and Tolerations for Custom Taints
+## Adding NodeSelector Settings and Tolerations for Custom Taints
You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md)
-### Enabling the Logging Application to Work with SELinux
+## Enabling the Logging Application to Work with SELinux
:::note Requirements:
@@ -49,7 +49,7 @@ To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RP
Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`.
-### Additional Logging Sources
+## Additional Logging Sources
By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types.
@@ -72,7 +72,7 @@ When enabled, Rancher collects all additional node and control plane logs the pr
If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs.
-### Systemd Configuration
+## Systemd Configuration
In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions.
@@ -87,7 +87,7 @@ K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of
* If `/var/log/journal` exists, then use `/var/log/journal`.
* If `/var/log/journal` does not exist, then use `/run/log/journal`.
-:::note Notes:
+:::note
If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node:
@@ -95,4 +95,4 @@ If any value not described above is returned, Rancher Logging will not be able t
* Reboot your machine.
* Set `systemdLogPath` to `/run/log/journal`.
-:::
\ No newline at end of file
+:::
diff --git a/docs/integrations-in-rancher/logging/taints-and-tolerations.md b/docs/integrations-in-rancher/logging/taints-and-tolerations.md
index 327cf554fda..0147598e84c 100644
--- a/docs/integrations-in-rancher/logging/taints-and-tolerations.md
+++ b/docs/integrations-in-rancher/logging/taints-and-tolerations.md
@@ -20,7 +20,7 @@ Both provide choice for the what node(s) the pod will run on.
- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints)
-### Default Implementation in Rancher's Logging Stack
+## Default Implementation in Rancher's Logging Stack
By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes.
The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes.
@@ -47,7 +47,7 @@ In the above example, we ensure that our pod only runs on Linux nodes, and we ad
You can do the same with Rancher's existing taints, or with your own custom ones.
-### Adding NodeSelector Settings and Tolerations for Custom Taints
+## Adding NodeSelector Settings and Tolerations for Custom Taints
If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values.
diff --git a/docs/integrations-in-rancher/longhorn/overview.md b/docs/integrations-in-rancher/longhorn/overview.md
index db7e4a62076..13a581175d2 100644
--- a/docs/integrations-in-rancher/longhorn/overview.md
+++ b/docs/integrations-in-rancher/longhorn/overview.md
@@ -25,7 +25,7 @@ With Longhorn, you can:

-### Installing Longhorn with Rancher
+## Installing Longhorn with Rancher
1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/latest/deploy/install/#installation-requirements)
1. Go to the cluster where you want to install Longhorn.
@@ -37,14 +37,14 @@ With Longhorn, you can:
**Result:** Longhorn is deployed in the Kubernetes cluster.
-### Accessing Longhorn from the Rancher UI
+## Accessing Longhorn from the Rancher UI
1. Go to the cluster where Longhorn is installed. In the left navigation menu, click **Longhorn**.
1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section.
**Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3.
-### Uninstalling Longhorn from the Rancher UI
+## Uninstalling Longhorn from the Rancher UI
1. Go to the cluster where Longhorn is installed and click **Apps**.
1. Click **Installed Apps**.
@@ -53,15 +53,15 @@ With Longhorn, you can:
**Result:** Longhorn is uninstalled.
-### GitHub Repository
+## GitHub Repository
The Longhorn project is available [here.](https://github.com/longhorn/longhorn)
-### Documentation
+## Documentation
The Longhorn documentation is [here.](https://longhorn.io/docs/)
-### Architecture
+## Architecture
Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes.
diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
index da6460a0da7..79b243cb78d 100644
--- a/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
+++ b/docs/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
@@ -15,7 +15,7 @@ For information on V1 monitoring and alerting, available in Rancher v2.2 up to v
Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster.
-### Features
+## Features
Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus.
@@ -97,7 +97,6 @@ To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts
For more details on how to upgrade wins on existing Windows hosts, see [Windows cluster support for Monitoring V2.](windows-support.md).
-
## Known Issues
There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more than the allotted default memory. If you enable monitoring on a K3s cluster, set `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi.
diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md b/docs/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
index 583611b99c0..101e1611b37 100644
--- a/docs/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
+++ b/docs/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
@@ -112,7 +112,7 @@ Monitoring also creates additional `ClusterRoles` that aren't assigned to users
| Role | Purpose |
| ------------------------------| ---------------------------|
-| monitoring-ui-view | _Available as of Monitoring v2 14.5.100+_ This ClusterRole allows users with write access to the project to view metrics graphs for the specified cluster in the Rancher UI. This is done by granting Read-only access to external Monitoring UIs. Users with this role have permission to list the Prometheus, Alertmanager, and Grafana endpoints and make GET requests to Prometheus, Alertmanager, and Grafana UIs through the Rancher proxy. This role doesn't grant access to monitoring endpoints. As a result, users with this role won't be able to view cluster monitoring graphs and dashboards in the Rancher UI; however, they are able to access the monitoring Grafana, Prometheus, and Alertmanager UIs if provided those links. |
+| monitoring-ui-view | This ClusterRole allows users with write access to the project to view metrics graphs for the specified cluster in the Rancher UI. This is done by granting Read-only access to external Monitoring UIs. Users with this role have permission to list the Prometheus, Alertmanager, and Grafana endpoints and make GET requests to Prometheus, Alertmanager, and Grafana UIs through the Rancher proxy. This role doesn't grant access to monitoring endpoints. As a result, users with this role won't be able to view cluster monitoring graphs and dashboards in the Rancher UI; however, they are able to access the monitoring Grafana, Prometheus, and Alertmanager UIs if provided those links. |
:::note
diff --git a/docs/integrations-in-rancher/monitoring-and-alerting/windows-support.md b/docs/integrations-in-rancher/monitoring-and-alerting/windows-support.md
index 8869e2cefe5..7f93c644ab2 100644
--- a/docs/integrations-in-rancher/monitoring-and-alerting/windows-support.md
+++ b/docs/integrations-in-rancher/monitoring-and-alerting/windows-support.md
@@ -6,9 +6,7 @@ title: Windows Cluster Support for Monitoring V2
-_Available as of v2.5.8_
-
-Starting at Monitoring V2 14.5.100 (used by default in Rancher 2.5.8), Monitoring V2 can now be deployed on a Windows cluster and will scrape metrics from Windows nodes using [prometheus-community/windows_exporter](https://github.com/prometheus-community/windows_exporter) (previously named `wmi_exporter`).
+Monitoring V2 can be deployed on a Windows cluster to scrape metrics from Windows nodes using [prometheus-community/windows_exporter](https://github.com/prometheus-community/windows_exporter) (previously named `wmi_exporter`).
## Cluster Requirements
diff --git a/docs/integrations-in-rancher/neuvector/overview.md b/docs/integrations-in-rancher/neuvector/overview.md
index cec0d643afd..cf7ec91cdd9 100644
--- a/docs/integrations-in-rancher/neuvector/overview.md
+++ b/docs/integrations-in-rancher/neuvector/overview.md
@@ -6,13 +6,11 @@ title: Overview
-### NeuVector Integration in Rancher
-
[NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is integrated with Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../reference-guides/rancher-security/rancher-security.md).
NeuVector can be enabled through a Helm chart that may be installed either through **Apps** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace).
-### Installing NeuVector with Rancher
+## Installing NeuVector with Rancher
The Harvester Helm Chart is used to manage access to the NeuVector UI in Rancher where users can navigate directly to deploy and manage their NeuVector clusters.
@@ -44,12 +42,12 @@ Some examples are as follows:
1. Click on **Cluster Tools** at the bottom of the left navigation bar.
1. Repeat step 4 above to select your container runtime accordingly, then click **Install** again.
-### Accessing NeuVector from the Rancher UI
+## Accessing NeuVector from the Rancher UI
1. Navigate to the cluster explorer of the cluster where NeuVector is installed. In the left navigation bar, click **NeuVector**.
1. Click the external link to go to the NeuVector UI. Once the link is selected, users must accept the `END USER LICENSE AGREEMENT` to access the NeuVector UI.
-### Uninstalling NeuVector from the Rancher UI
+## Uninstalling NeuVector from the Rancher UI
**To uninstall from Apps:**
@@ -62,15 +60,15 @@ Some examples are as follows:
1. Click **☰ > Cluster Management**.
1. Click on **Cluster Tools** at the bottom-left of the screen, then click on the trash can icon under the NeuVector chart. Select `Delete the CRD associated with this app` if desired, then click **Delete**.
-### GitHub Repository
+## GitHub Repository
The NeuVector project is available [here](https://github.com/neuvector/neuvector).
-### Documentation
+## Documentation
The NeuVector documentation is [here](https://open-docs.neuvector.com/).
-### Architecture
+## Architecture
The NeuVector security solution contains four types of security containers: Controllers, Enforcers, Managers, and Scanners. A special container called an All-in-One is also provided to combine the Controller, Enforcer, and Manager functions all in one container, primarily for Docker-native deployments. There is also an Updater which, when run, will update the CVE database.
@@ -91,7 +89,7 @@ The NeuVector security solution contains four types of security containers: Cont
To learn more about NeuVector's architecture, please refer [here](https://open-docs.neuvector.com/basics/overview#architecture).
-### CPU and Memory Allocations
+## CPU and Memory Allocations
Below are the minimum recommended computing resources for the NeuVector chart installation in a default deployment. Note that the resource limit is not set.
@@ -105,7 +103,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in
\* Minimum 1GB of memory total required for Controller, Manager, and Scanner containers combined.
-### Hardened Cluster Support - Calico and Canal
+## Hardened Cluster Support - Calico and Canal
@@ -162,7 +160,7 @@ Below are the minimum recommended computing resources for the NeuVector chart in
-### SELinux-enabled Cluster Support - Calico and Canal
+## SELinux-enabled Cluster Support - Calico and Canal
To enable SELinux on RKE2 clusters, follow the steps below:
@@ -179,12 +177,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{
kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}'
```
-### Cluster Support in an Air-Gapped Environment
+## Cluster Support in an Air-Gapped Environment
- All NeuVector components are deployable on a cluster in an air-gapped environment without any additional configuration needed.
-
-### Support Limitations
+## Support Limitations
* Only admins and cluster owners are currently supported.
@@ -192,12 +189,10 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
* NeuVector is not supported on a Windows cluster.
-
-### Other Limitations
+## Other Limitations
* Currently, NeuVector feature chart installation fails when a NeuVector partner chart already exists. To work around this issue, uninstall the NeuVector partner chart and reinstall the NeuVector feature chart.
* Sometimes when the controllers are not ready, the NeuVector UI is not accessible from the Rancher UI. During this time, controllers will try to restart, and it takes a few minutes for the controllers to be active.
* Container runtime is not auto-detected for different cluster types when installing the NeuVector chart. To work around this, you can specify the runtime manually.
-
diff --git a/docs/integrations-in-rancher/suse-observability/suse-observability.md b/docs/integrations-in-rancher/suse-observability/suse-observability.md
new file mode 100644
index 00000000000..cd1b2ed430c
--- /dev/null
+++ b/docs/integrations-in-rancher/suse-observability/suse-observability.md
@@ -0,0 +1,21 @@
+---
+title: SUSE Observability
+---
+
+
+
+
+
+SUSE Observability is a complete observability solution that provides deep insights into the health of your clusters and nodes, and the workloads running on them. Designed to give you clear visibility into your entire Kubernetes environment, SUSE Observability’s full-stack approach allows you to seamlessly explore everything from services to infrastructure within a single platform, eliminating the need for multiple observability tools.
+
+SUSE Observability securely collects and correlates data, offering actionable insights into both existing and potential issues in your cluster. This helps you address current problems swiftly and take preventative measures against future challenges.
+
+The intuitive dashboards highlight problem areas and offer remediation steps, guiding you from issue identification to root cause analysis, and ultimately to resolution, in the quickest possible time.
+
+For more information and to set up SUSE Observability in your SUSE Rancher-managed Kubernetes cluster, please refer to the [documentation](https://docs.stackstate.com/).
+
+:::note
+
+The documentation portal for SUSE Observability is currently under development. In the coming months, the portal will be rolled out featuring comprehensive guides, tutorials, and references to support you on your SUSE Observability journey. Stay tuned!
+
+:::
diff --git a/docs/reference-guides/about-the-api/about-the-api.md b/docs/reference-guides/about-the-api/about-the-api.md
deleted file mode 100644
index 21f3c03bdc1..00000000000
--- a/docs/reference-guides/about-the-api/about-the-api.md
+++ /dev/null
@@ -1,93 +0,0 @@
----
-title: API
----
-
-
-
-
-
-## How to use the API
-
-The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it:
-
-
-
-
-1. Click on your user avatar in the upper right corner.
-1. Click **Account & API Keys**.
-1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link will look something like `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment.
-
-
-
-
-Go to the URL endpoint at `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment.
-
-
-
-
-## Authentication
-
-API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys](../user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take.
-
-By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](api-tokens.md).
-
-## Making requests
-
-The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md).
-
-- Every type has a Schema which describes:
- - The URL to get to the collection of this type of resources
- - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc.
- - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas).
- - Every field that filtering is allowed on
- - What HTTP verb methods are available for the collection itself, or for individual resources in the collection.
-
-
-- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information.
-
-- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases.
-
-- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself.
-
-- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information.
-
-- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored.
-
-- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource.
-
-- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`).
-
-## Filtering
-
-Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details.
-
-## Sorting
-
-Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified.
-
-## Pagination
-
-API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not.
-
-## Capturing Rancher API Calls
-
-You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster:
-
-1. In the Rancher UI, go to **Cluster Management** and click **Create.**
-1. Click one of the cluster types. This example uses Digital Ocean.
-1. Fill out the form with a cluster name and node template, but don't click **Create**.
-1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.**
-1. In the developer tools, click the **Network** tab.
-1. On the **Network** tab, make sure **Fetch/XHR** is selected.
-1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`.
-1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.**
-1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials.
-
-### Enable View in API
-
-You can also view captured Rancher API calls for your respective clusters and resources. This feature is not enabled by default. To enable it:
-
-1. Click on your **User Tile** in the top right corner of the UI and select **Preferences** from the dropdown menu.
-2. Under the **Advanced Features** section, click **Enable "View in API"**
-
-Once checked, the **View in API** link will now appear under the **⋮** sub-menu on resource pages in the UI.
diff --git a/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
index 8c71b562ae0..9dbf6d903c6 100644
--- a/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
+++ b/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
@@ -18,7 +18,7 @@ When you set up your high-availability Rancher installation, consider the follow
Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on.
### Make sure nodes are configured correctly for Kubernetes
-It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://etcd.io/docs/v3.4/op-guide/performance/).
+It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://etcd.io/docs/v3.5/op-guide/performance/).
### When using RKE: Back up the Statefile
RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file.
diff --git a/docs/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md b/docs/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
index 50687cdd32c..22642bbdf63 100644
--- a/docs/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
+++ b/docs/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
@@ -49,7 +49,6 @@ This is typical in Rancher, as many operations create new `RoleBinding` objects
You can reduce the number of `RoleBindings` in the upstream cluster in the following ways:
* Limit the use of the [Restricted Admin](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#restricted-admin) role. Apply other roles wherever possible.
-* If you use [external authentication](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md), use groups to assign roles.
* Only add users to clusters and projects when necessary.
* Remove clusters and projects when they are no longer needed.
* Only use custom roles if necessary.
@@ -59,6 +58,12 @@ You can reduce the number of `RoleBindings` in the upstream cluster in the follo
* Kubernetes permissions are always "additive" (allow-list) rather than "subtractive" (deny-list). Try to minimize configurations that gives access to all but one aspect of a cluster, project, or namespace, as that will result in the creation of a high number of `RoleBinding` objects.
* Experiment to see if creating new projects or clusters manifests in fewer `RoleBindings` for your specific use case.
+### Using External Authentication
+
+If you have fifty or more users, you should configure an [external authentication provider](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md). This is necessary for better performance.
+
+After you configure external authentication, make sure to assign permissions to groups instead of to individual users. This helps reduce the `RoleBinding` object count.
+
### RoleBinding Count Estimation
Predicting how many `RoleBinding` objects a given configuration will create is complicated. However, the following considerations can offer a rough estimate:
@@ -83,7 +88,7 @@ An [Authorized Cluster Endpoint](../../../reference-guides/rancher-manager-archi
### Reducing Event Handler Executions
-The bulk of Rancher's logic occurs on event handlers. These event handlers run on an object whenever the object is updated, and when Rancher is started. Additionally, they run every 15 hours when Rancher syncs caches. In scaled setups these scheduled runs come with huge performance costs because every handler is being run on every applicable object. However, the scheduled handler execution can be disabled with the `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` environment variable. If resource allocation spikes are seen every 15 hours, this setting can help.
+The bulk of Rancher's logic occurs on event handlers. These event handlers run on an object whenever the object is updated, and when Rancher is started. Additionally, they run every 10 hours when Rancher syncs caches. In scaled setups these scheduled runs come with huge performance costs because every handler is being run on every applicable object. However, the scheduled handler execution can be disabled with the `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` environment variable. If resource allocation spikes are seen every 10 hours, this setting can help.
The value for `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` can be a comma separated list of the following options. The values refer to types of handlers and controllers (the structures that contain and run handlers). Adding the controller types to the variable disables that set of controllers from running their handlers as part of cache resyncing.
@@ -91,7 +96,7 @@ The value for `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` can be a comma separated list o
* `user` refers to user controllers which run for every cluster. Some of these run on the same node as management controllers, while others run in the downstream cluster. This option targets the former.
* `scaled` refers to scaled controllers which run on every Rancher node. You should avoid setting this value, as the scaled handlers are responsible for critical functions and changes may disrupt cluster stability.
-In short, if you notice CPU usage peaks every 15 hours, add the `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` environment variable to your Rancher deployment (in the `spec.containers.env` list) with the value `mgmt,user`
+In short, if you notice CPU usage peaks every 10 hours, add the `CATTLE_SYNC_ONLY_CHANGED_OBJECTS` environment variable to your Rancher deployment (in the `spec.containers.env` list) with the value `mgmt,user`
## Optimizations Outside of Rancher
@@ -105,6 +110,14 @@ Although managed Kubernetes services make it easier to deploy and run Kubernetes
Use RKE2 for large scale use cases.
+### Keep all Upstream Cluster Nodes co-located
+
+To provide high availability, Kubernetes is designed to run nodes and control components in different zones. However, if nodes and control plane components are located in different zones, network traffic may be slower.
+
+Traffic between Rancher components and the Kubernetes API is especially sensitive to network latency, as is etcd traffic between nodes.
+
+To improve performance, run all upstream node clusters in the same location. In particular, make sure that latency between etcd nodes and Rancher is as low as possible.
+
### Keeping Kubernetes Versions Up to Date
You should keep the local Kubernetes cluster up to date. This will ensure that your cluster has all available performance enhancements and bug fixes.
@@ -113,8 +126,18 @@ You should keep the local Kubernetes cluster up to date. This will ensure that y
Etcd is the backend database for Kubernetes and for Rancher. It plays a very important role in Rancher performance.
-The two main bottlenecks to [etcd performance](https://etcd.io/docs/v3.4/op-guide/performance/) are disk and network speed. Etcd should run on dedicated nodes with a fast network setup and with SSDs that have high input/output operations per second (IOPS). For more information regarding etcd performance, see [Slow etcd performance (performance testing and optimization)](https://www.suse.com/support/kb/doc/?id=000020100) and [Tuning etcd for Large Installations](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md). Information on disks can also be found in the [Installation Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#disks).
+The two main bottlenecks to [etcd performance](https://etcd.io/docs/v3.5/op-guide/performance/) are disk and network speed. Etcd should run on dedicated nodes with a fast network setup and with SSDs that have high input/output operations per second (IOPS). For more information regarding etcd performance, see [Slow etcd performance (performance testing and optimization)](https://www.suse.com/support/kb/doc/?id=000020100) and [Tuning etcd for Large Installations](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md). Information on disks can also be found in the [Installation Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#disks).
It's best to run etcd on exactly three nodes, as adding more nodes will reduce operation speed. This may be counter-intuitive to common scaling approaches, but it's due to etcd's [replication mechanisms](https://etcd.io/docs/v3.5/faq/#what-is-maximum-cluster-size).
Etcd performance will also be negatively affected by network latency between nodes as that will slow down network communication. Etcd nodes should be located together with Rancher nodes.
+
+### Browser Requirements
+
+At high scale, Rancher transfers more data from the upstream cluster to UI components running in the browser, and those components also need to perform more processing.
+
+For best performance, ensure that the host running the hardware meets these requirements:
+ - 2020 i5 10th generation Intel (4 cores) or equivalent
+ - 8 GB RAM
+ - Total network bandwith to the upstream cluster: 72 Mb/s (equivalent to a single 802.11n Wi-Fi 4 link stream, ~8 MB/s http download throughput)
+ - Round-trip time (ping time) from browser to upstream cluster: 150 ms or less
diff --git a/docs/reference-guides/cli-with-rancher/kubectl-utility.md b/docs/reference-guides/cli-with-rancher/kubectl-utility.md
index fefa1a66b1b..5b863d2e4be 100644
--- a/docs/reference-guides/cli-with-rancher/kubectl-utility.md
+++ b/docs/reference-guides/cli-with-rancher/kubectl-utility.md
@@ -22,7 +22,7 @@ Run `kubectl cluster-info` or `kubectl get pods` successfully.
_Requirements_
-If admins have [kubeconfig token generation turned off](../about-the-api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), the kubeconfig file requires the [Rancher CLI](./rancher-cli.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like:
+If admins have [kubeconfig token generation turned off](../../api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), the kubeconfig file requires the [Rancher CLI](./rancher-cli.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like:
`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`.
This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported:
@@ -32,5 +32,6 @@ This feature enables kubectl to authenticate with the Rancher server and get a n
3. FreeIPA
4. OpenLDAP
5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth
+6. Azure AD
-When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../about-the-api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), or [gets deleted from the Rancher server](../about-the-api/api-tokens.md#deleting-tokens). Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again.
\ No newline at end of file
+When you first run kubectl, for example, `kubectl get pods`, you are prompted to pick an auth provider and log in with the Rancher server. The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../../api/api-tokens.md#disable-tokens-in-generated-kubeconfigs), or [gets deleted from the Rancher server](../../api/api-tokens.md#deleting-tokens). Upon expiration, you must log in with the Rancher server again to run the `kubectl get pods` command.
diff --git a/docs/reference-guides/cli-with-rancher/rancher-cli.md b/docs/reference-guides/cli-with-rancher/rancher-cli.md
index e93d9805480..adff436c8e5 100644
--- a/docs/reference-guides/cli-with-rancher/rancher-cli.md
+++ b/docs/reference-guides/cli-with-rancher/rancher-cli.md
@@ -9,7 +9,7 @@ description: Interact with Rancher using command line interface (CLI) tools from
The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI.
-### Download Rancher CLI
+## Download Rancher CLI
The binary can be downloaded directly from the UI.
@@ -17,14 +17,14 @@ The binary can be downloaded directly from the UI.
1. At the bottom of the navigation sidebar menu, click **About**.
1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary.
-### Requirements
+## Requirements
After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires:
- Your Rancher Server URL, which is used to connect to Rancher Server.
- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../user-settings/api-keys.md).
-### CLI Authentication
+## CLI Authentication
Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information):
@@ -34,7 +34,7 @@ $ ./rancher login https:// --token
If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection.
-### Project Selection
+## Project Selection
Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project.
@@ -58,7 +58,7 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json
Ensure you can run `rancher kubectl get pods` successfully.
-### Commands
+## Commands
The following commands are available for use in Rancher CLI.
@@ -86,12 +86,12 @@ The following commands are available for use in Rancher CLI.
| `token` | Authenticates and generates new kubeconfig token. |
| `help, [h]` | Shows a list of commands or help for one command. |
-### Rancher CLI Help
+## Rancher CLI Help
Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands.
All commands accept the `--help` flag, which documents each command's usage.
-### Limitations
+## Limitations
The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md).
diff --git a/docs/reference-guides/cluster-configuration/cluster-configuration.md b/docs/reference-guides/cluster-configuration/cluster-configuration.md
index 139023b17d7..8abd3377435 100644
--- a/docs/reference-guides/cluster-configuration/cluster-configuration.md
+++ b/docs/reference-guides/cluster-configuration/cluster-configuration.md
@@ -10,7 +10,7 @@ After you provision a Kubernetes cluster using Rancher, you can still edit optio
For information on editing cluster membership, go to [this page.](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md)
-### Cluster Configuration References
+## Cluster Configuration References
The cluster configuration options depend on the type of Kubernetes cluster:
@@ -21,7 +21,7 @@ The cluster configuration options depend on the type of Kubernetes cluster:
- [GKE Cluster Configuration](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)
- [AKS Cluster Configuration](rancher-server-configuration/aks-cluster-configuration.md)
-### Cluster Management Capabilities by Cluster Type
+## Cluster Management Capabilities by Cluster Type
The options and settings available for an existing cluster change based on the method that you used to provision it.
diff --git a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
index 87b5fccdcfb..11b7a300a97 100644
--- a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
+++ b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
@@ -8,11 +8,11 @@ title: DigitalOcean Node Template Configuration
Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one.
-### Droplet Options
+## Droplet Options
The **Droplet Options** provision your cluster's geographical region and specifications.
-### Docker Daemon
+## Docker Daemon
If you use Docker, the [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include:
diff --git a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md
index cfc2ed183b7..805ed2170d8 100644
--- a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md
+++ b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md
@@ -33,6 +33,7 @@ The fields in the **Scheduling** section should auto-populate with the data cent
| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. |
| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. |
| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. |
+| Graceful Shutdown Timeout | | The amount of time, in seconds, that Rancher waits before deleting virtual machines on a cluster. If set to `0`, graceful shutdown is disabled. Only accepts integer values. |
## Instance Options
diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
index dc3974e551f..9fac68fb5a8 100644
--- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
+++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
@@ -6,13 +6,6 @@ title: AKS Cluster Configuration Reference
-## Changes in Rancher v2.6
-
-- Support for adding more than one node pool
-- Support for private clusters
-- Enabled autoscaling node pools
-- The AKS permissions are now configured in cloud credentials
-
## Role-based Access Control
When provisioning an AKS cluster in the Rancher UI, RBAC cannot be disabled. If role-based access control is disabled for the cluster in AKS, the cluster cannot be registered or imported into Rancher.
diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md
index 9d7a2abb0d6..612d323c0f9 100644
--- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md
+++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md
@@ -6,12 +6,6 @@ title: GKE Cluster Configuration Reference
-## Changes in Rancher v2.6
-
-- Support for additional configuration options:
- - Project network isolation
- - Network tags
-
## Cluster Location
| Value | Description |
diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
index 553ab6396cf..4322fb5e2d2 100644
--- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
+++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
@@ -8,11 +8,11 @@ title: Private Clusters
In GKE, [private clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept) are clusters whose nodes are isolated from inbound and outbound traffic by assigning them internal IP addresses only. Private clusters in GKE have the option of exposing the control plane endpoint as a publicly accessible address or as a private address. This is different from other Kubernetes providers, which may refer to clusters with private control plane endpoints as "private clusters" but still allow traffic to and from nodes. You may want to create a cluster with private nodes, with or without a public control plane endpoint, depending on your organization's networking and security requirements. A GKE cluster provisioned from Rancher can use isolated nodes by selecting "Private Cluster" in the Cluster Options (under "Show advanced options"). The control plane endpoint can optionally be made private by selecting "Enable Private Endpoint".
-### Private Nodes
+## Private Nodes
Because the nodes in a private cluster only have internal IP addresses, they will not be able to install the cluster agent and Rancher will not be able to fully manage the cluster. This can be overcome in a few ways.
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -20,9 +20,9 @@ Cloud NAT will [incur charges](https://cloud.google.com/nat/pricing).
:::
-If restricting outgoing internet access is not a concern for your organization, use Google's [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) service to allow nodes in the private network to access the internet, enabling them to download the required images from Dockerhub and contact the Rancher management server. This is the simplest solution.
+If restricting outgoing internet access is not a concern for your organization, use Google's [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) service to allow nodes in the private network to access the internet, enabling them to download the required images from Docker Hub and contact the Rancher management server. This is the simplest solution.
-#### Private registry
+### Private Registry
:::caution
@@ -32,11 +32,11 @@ This scenario is not officially supported, but is described for cases in which u
If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](../../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it.
-### Private Control Plane Endpoint
+## Private Control Plane Endpoint
If the cluster has a public endpoint exposed, Rancher will be able to reach the cluster, and no additional steps need to be taken. However, if the cluster has no public endpoint, then considerations must be made to ensure Rancher can access the cluster.
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -47,7 +47,7 @@ Cloud NAT will [incur charges](https://cloud.google.com/nat/pricing).
As above, if restricting outgoing internet access to the nodes is not a concern, then Google's [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) service can be used to allow the nodes to access the internet. While the cluster is provisioning, Rancher will provide a registration command to run on the cluster. Download the [kubeconfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) for the new cluster and run the provided kubectl command on the cluster. Gaining access
to the cluster in order to run this command can be done by creating a temporary node or using an existing node in the VPC, or by logging on to or creating an SSH tunnel through one of the cluster nodes.
-#### Direct access
+### Direct Access
If the Rancher server is run on the same VPC as the cluster's control plane, it will have direct access to the control plane's private endpoint. The cluster nodes will need to have access to a [private registry](#private-registry) to download images as described above.
diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
index 44b7f28bd26..aab9b19794a 100644
--- a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
+++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
@@ -149,13 +149,13 @@ Project network isolation is available if you are using any RKE2 network plugin
##### CoreDNS
-By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#coredns) for additional CoreDNS configurations.
+By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [RKE2 documentation](https://docs.rke2.io/networking/networking_services#coredns) for additional CoreDNS configurations.
##### NGINX Ingress
-If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options.
+If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. Refer to the [RKE2 documentation](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller) for additional configuration options.
-Refer to the [RKE2 documentation](https://docs.rke2.io/networking#nginx-ingress-controller) for additional configuration options.
+Refer to the [RKE2 documentation](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller) for additional configuration options.
##### Metrics Server
diff --git a/docs/reference-guides/monitoring-v2-configuration/examples.md b/docs/reference-guides/monitoring-v2-configuration/examples.md
index 1660d70a31f..04b17ca9f5e 100644
--- a/docs/reference-guides/monitoring-v2-configuration/examples.md
+++ b/docs/reference-guides/monitoring-v2-configuration/examples.md
@@ -6,15 +6,15 @@ title: Monitoring Configuration Examples
-### ServiceMonitor
+## ServiceMonitor
See the official prometheus-operator GitHub repo for an example [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) YAML.
-### PodMonitor
+## PodMonitor
See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/getting-started/#using-podmonitors) for an example PodMonitor and an example Prometheus resource that refers to a PodMonitor.
-### PrometheusRule
+## PrometheusRule
A PrometheusRule contains the alerting and recording rules that you would usually place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/).
@@ -22,6 +22,6 @@ For a more fine-grained approach, the `ruleSelector` field on a Prometheus resou
See the [Prometheus Operator documentation](https://prometheus-operator.dev/docs/user-guides/alerting/) for an example PrometheusRule.
-### Alertmanager Config
+## Alertmanager Config
See the Rancher docs page on Receivers for an example [Alertmanager config](./receivers.md#example-alertmanager-configs).
diff --git a/docs/reference-guides/monitoring-v2-configuration/receivers.md b/docs/reference-guides/monitoring-v2-configuration/receivers.md
index 79d9749dc18..b1237e3646b 100644
--- a/docs/reference-guides/monitoring-v2-configuration/receivers.md
+++ b/docs/reference-guides/monitoring-v2-configuration/receivers.md
@@ -29,6 +29,9 @@ This section assumes familiarity with how monitoring components work together. F
1. Go to the cluster where you want to create receivers. Click **Monitoring -> Alerting -> AlertManagerConfigs**.
1. Click **Create**.
+1. Enter a **Name** for the new AlertmanagerConfig.
+1. Click **Create**.
+1. After creating the AlertManagerConfig, click it to add a receiver.
1. Click **Add Receiver**.
1. Enter a **Name** for the receiver.
1. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below.
@@ -55,11 +58,11 @@ The notification integrations are configured with the `receiver`, which is expla
By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI.
-For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI.
+For notification mechanisms, such as Telegram, that are not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI.
Currently the Rancher Alerting Drivers app provides access to the following integrations:
-- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver
-- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver
+- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver.
+- Telegram, based on the [Sachet](https://github.com/messagebird/sachet) driver.
The following types of receivers can be configured in the Rancher UI:
@@ -71,6 +74,7 @@ The following types of receivers can be configured in the Rancher UI:
- Custom
- Teams
- SMS
+- Telegram
The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI.
@@ -132,8 +136,6 @@ Opsgenie Responders:
| Proxy URL | Proxy for the webhook notification. |
| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). |
-
-
## Custom
The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret.
@@ -211,6 +213,87 @@ url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert
+## Telegram
+
+### Enabling the Telegram Receiver for Rancher Managed Clusters
+
+The Telegram receiver is not a native receiver. You must enable it before it can be used. You can enable the Telegram receiver for a Rancher-managed cluster by going to the **Apps** page and installing the `rancher-alerting-drivers` app with the **Telegram** option selected:
+
+1. In the upper left corner, click **☰ > Cluster Management**.
+1. On the **Clusters** page, go to the cluster where you want to install `rancher-alerting-drivers` and click **Explore**.
+1. In the left navigation bar, click on **Apps**.
+1. Click the **Alerting Drivers** app.
+1. Click on **Install**.
+1. In the page that opens next, make sure that **Enable SMS** checkbox is selected. Telegram notifications require you to enable SMS.
+1. Take note of the namespace used as it will be required in a later step.
+
+### Test the Configuration by Configuring a PrometheusRule
+
+To test your Telegram setup, create a **PrometheusRule** that continuously raises alerts.
+
+:::caution NOTE
+This rule is intended only to test if Telegram alerts work as expected. Do not leave it on after testing is completed.
+:::
+
+1. In the left navigation menu, click **Monitoring**.
+1. Click **Advanced**.
+1. Click **PrometheusRules > Create**.
+1. Select a namespace to place the rule in and name the rule appropriately.
+1. Set the group name to `test`. Use this value later when you create a **Route** in the **AlertManagerConfig**.
+1. Under **Alerting Rules** click **Add**.
+1. Set an appropriate **Alert Name**.
+1. To trigger the alert immediately and continuously, enter the following PromQL Expression: `vector(1)`.
+1. Under **Labels**, click **Add Label**. Enter the key `test` and value `alert`. This key-value pair will also be used later.
+
+#### Configure an AlertManagerConfig
+
+Configure an **AlertManagerConfig** to contain the **Receiver** and **Route** configuration for the **PrometheusRule** created above:
+1. Click **Monitoring > Alerting**, and open **AlertManagerConfigs**.
+1. Click **Create**
+
+#### Create a Receiver in AlertManagerConfig
+
+1. Choose a namespace from the dropdown and set an appropriate name.
+1. Click **Create**.
+1. Open the newly created **AlertManagerConfig** and click **⋮ > Edit Config**.
+1. Click **Add Receiver**.
+1. Select **Webhook** from the list on the **Create Receiver in AlertmanagerConfig** page.
+1. Name the webhook, and click **Add Webhook**.
+1. In the **Select Webhook Type** dropdown, select **SMS**. This will automatically populate the **Target** field as `http://rancher-alerting-drivers-sachet.cattle-monitoring-system.svc:9876/alert`. If you installed the **Alerting Drivers** in a namespace other than `cattle-monitoring-system`, the target URL will reflect that.
+1. Click **Create**.
+
+#### Create a Route in AlertManagerConfig
+
+1. Click **⋮ > Edit Config**.
+1. Click **Route**.
+1. In the dropdown, select the **Receiver** you just created.
+1. In the **Labels to Group Alerts By** field, type `test`.
+1. Under **Waiting and Intervals**, set **Group Wait** to `1s` and **Group Interval** to `10s`. This triggers frequent alerts. Change the values as appropriate.
+1. Under **Matchers** click **Add Matcher**. Enter `test` in the **Name** field and `alert` in the **Value** field. From the **Match Type** dropdown, select `MatchEqual`.
+1. Click **Save**.
+
+### Configuring the Telegram Receiver
+
+You can configure the Telegram receiver by updating the `rancher-alerting-drivers-sachet` ConfigMap in the `cattle-monitoring-system` namespace. For example, the following is a minimal Telegram receiver configuration:
+
+```yaml
+providers:
+ telegram:
+ token:
+
+receivers:
+- name: 'cattle-monitoring-system/test-amc/prom2tel'
+ provider: 'telegram'
+ to:
+ - '123456789'
+```
+
+To obtain a Telegram token, setup a Telegram bot. Refer to the [official Telegram guide](https://core.telegram.org/bots/tutorial) for details.
+After you finish configuring the receiver, [add](#creating-receivers-in-the-rancher-ui) it.
+
+Name the receiver `//`. Enter `123456789` as a placeholder for the Telegram user ID to send the notifications to. To find your Telegram ID, check [the Telegram userinfo bot](https://telegram.me/userinfobot).
+
+You should now receive Telegram notifications to the user ID. If you don't receive notifications, please check if there are any errors reported in the Pod for the Deployment `rancher-alerting-drivers-sachet` under the `cattle-monitoring-system` namespace.
## Configuring Multiple Receivers
diff --git a/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
index 30fccb634c8..1eb42e346e4 100644
--- a/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
+++ b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
@@ -18,7 +18,7 @@ This section assumes familiarity with how monitoring components work together. F
:::
-### ServiceMonitors
+## ServiceMonitors
This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored.
@@ -28,7 +28,7 @@ Any Services in your cluster that match the labels located within the ServiceMon
For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)
-### PodMonitors
+## PodMonitors
This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored.
diff --git a/docs/reference-guides/prometheus-federator/prometheus-federator.md b/docs/reference-guides/prometheus-federator/prometheus-federator.md
index 5166ab8732f..dd5f22d93a6 100644
--- a/docs/reference-guides/prometheus-federator/prometheus-federator.md
+++ b/docs/reference-guides/prometheus-federator/prometheus-federator.md
@@ -26,18 +26,18 @@ Prometheus Federator is designed to be deployed alongside an existing Prometheus
2. On seeing each ProjectHelmChartCR, the operator will automatically deploy a Project Prometheus stack on the Project Owner's behalf in the **Project Release Namespace (`cattle-project--monitoring`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**.
3. RBAC will automatically be assigned in the Project Release Namespace to allow users to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack deployed; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). For more information, see the section on [configuring RBAC](rbac.md).
-### What is a Project?
+## What is a Project?
In Prometheus Federator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`. By default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given Rancher Project.
-### Configuring the Helm release created by a ProjectHelmChart
+## Configuring the Helm release created by a ProjectHelmChart
The `spec.values` of this ProjectHelmChart's resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either:
- View the chart's definition located at [`rancher/prometheus-federator` under `charts/rancher-project-monitoring`](https://github.com/rancher/prometheus-federator/blob/main/charts/rancher-project-monitoring) (where the chart version will be tied to the version of this operator).
- Look for the ConfigMap named `monitoring.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `prometheus-federator` binary).
-### Namespaces
+## Namespaces
As a Project Operator based on [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator), Prometheus Federator has three different classifications of namespaces that the operator looks out for:
@@ -65,7 +65,7 @@ As a Project Operator based on [rancher/helm-project-operator](https://github.co
:::
-### Helm Resources (HelmChart, HelmRelease)
+## Helm Resources (HelmChart, HelmRelease)
On deploying a ProjectHelmChart, the Prometheus Federator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn:
@@ -103,6 +103,6 @@ For more information on advanced configurations, refer to [this page](https://gi
|`helmProjectOperator.hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace. |
-->
-### Prometheus Federator on the Local Cluster
+## Prometheus Federator on the Local Cluster
Prometheus Federator is a resource intensive application. Installing it to the local cluster is possible, but **not recommended**.
\ No newline at end of file
diff --git a/docs/reference-guides/rancher-cluster-tools.md b/docs/reference-guides/rancher-cluster-tools.md
index 5221e0f150e..4607ae59035 100644
--- a/docs/reference-guides/rancher-cluster-tools.md
+++ b/docs/reference-guides/rancher-cluster-tools.md
@@ -17,7 +17,7 @@ Logging is helpful because it allows you to:
- Look for trends in your environment
- Save your logs to a safe location outside of your cluster
- Stay informed of events like a container crashing, a pod eviction, or a node dying
-- More easily debugg and troubleshoot problems
+- More easily debug and troubleshoot problems
Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd.
diff --git a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
index 381c4baee7d..e3dd9cb475e 100644
--- a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
+++ b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
@@ -21,7 +21,7 @@ The following descriptions correspond to the numbers in the diagram above:
3. [Node Agents](#3-node-agents)
4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint)
-### 1. The Authentication Proxy
+## 1. The Authentication Proxy
In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see
the pods. Bob is authenticated through Rancher's authentication proxy.
@@ -32,7 +32,7 @@ Rancher communicates with Kubernetes clusters using a [service account](https://
By default, Rancher generates a [kubeconfig file](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster.
-### 2. Cluster Controllers and Cluster Agents
+## 2. Cluster Controllers and Cluster Agents
Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server.
@@ -52,13 +52,13 @@ The cluster agent, also called `cattle-cluster-agent`, is a component that runs
- Applies the roles and bindings defined in each cluster's global policies
- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health
-### 3. Node Agents
+## 3. Node Agents
If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher.
The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots.
-### 4. Authorized Cluster Endpoint
+## 4. Authorized Cluster Endpoint
An authorized cluster endpoint (ACE) allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy.
@@ -89,6 +89,12 @@ We recommend exporting the kubeconfig file so that if Rancher goes down, you can
## Impersonation
+:::caution Known Issue
+
+Service account impersonation (`--as`) used by lower privileged user accounts to remove privileges is not implemented and is a [feature](https://github.com/rancher/rancher/issues/41988) being tracked.
+
+:::
+
Users technically exist only on the upstream cluster. Rancher creates [RoleBindings and ClusterRoleBindings](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) that refer to Rancher users, even though there is [no actual User resource](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#users-in-kubernetes) on the downstream cluster.
When users interact with a downstream cluster through the authentication proxy, there needs to be some entity downstream to serve as the actor for those requests. Rancher creates service accounts to be that entity. Each service account is only granted one permission, which is to **impersonate** the user they belong to. If there was only one service account that could impersonate any user, then it would be possible for a malicious user to corrupt that account and escalate their privileges by impersonating another user. This issue was the basis for a [CVE](https://github.com/rancher/rancher/security/advisories/GHSA-pvxj-25m6-7vqr).
diff --git a/docs/reference-guides/rancher-project-tools.md b/docs/reference-guides/rancher-project-tools.md
index d2b99f71fa9..d2d42178926 100644
--- a/docs/reference-guides/rancher-project-tools.md
+++ b/docs/reference-guides/rancher-project-tools.md
@@ -25,7 +25,7 @@ Logging is helpful because it allows you to:
- Look for trends in your environment
- Save your logs to a safe location outside of your cluster
- Stay informed of events like a container crashing, a pod eviction, or a node dying
-- More easily debugg and troubleshoot problems
+- More easily debug and troubleshoot problems
Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd.
diff --git a/docs/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md b/docs/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
index 90f9b494857..31dd193be80 100644
--- a/docs/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
+++ b/docs/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
@@ -102,7 +102,7 @@ The `rancher-restricted` template is provided by Rancher to enforce the highly-r
-K3s v1.24 and older support [Pod Security Policy (PSP)](https://v1-24.docs.kubernetes.io/docs/concepts/security/pod-security-policy/) for controlling pod security.
+K3s v1.24 and older support [Pod Security Policy (PSP)](https://github.com/kubernetes/website/blob/release-1.24/content/en/docs/concepts/security/pod-security-policy.md) for controlling pod security.
You can enable PSPs by passing the following flags in the cluster configuration in Rancher:
diff --git a/docs/reference-guides/rancher-security/kubernetes-security-best-practices.md b/docs/reference-guides/rancher-security/kubernetes-security-best-practices.md
index ace8bd95fab..50f39dcc1dc 100644
--- a/docs/reference-guides/rancher-security/kubernetes-security-best-practices.md
+++ b/docs/reference-guides/rancher-security/kubernetes-security-best-practices.md
@@ -6,7 +6,7 @@ title: Kubernetes Security Best Practices
-### Restricting cloud metadata API access
+## Restricting Cloud Metadata API Access
Cloud providers such as AWS, Azure, DigitalOcean or GCP often expose metadata services locally to instances. By default, this endpoint is accessible by pods running on a cloud instance, including pods in hosted Kubernetes providers such as EKS, AKS, DigitalOcean Kubernetes or GKE, and can contain cloud credentials for that node, provisioning data such as kubelet credentials, or other sensitive data. To mitigate this risk when running on a cloud platform, follow the [Kubernetes security recommendations](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/#restricting-cloud-metadata-api-access): limit permissions given to instance credentials, use network policies to restrict pod access to the metadata API, and avoid using provisioning data to deliver secrets.
diff --git a/docs/reference-guides/rancher-security/rancher-security-best-practices.md b/docs/reference-guides/rancher-security/rancher-security-best-practices.md
index df921789f25..fa958639c1f 100644
--- a/docs/reference-guides/rancher-security/rancher-security-best-practices.md
+++ b/docs/reference-guides/rancher-security/rancher-security-best-practices.md
@@ -6,7 +6,7 @@ title: Rancher Security Best Practices
-### Restrict Public Access to /version and /rancherversion Path
+## Restrict Public Access to /version and /rancherversion Path
The upstream (local) Rancher instance provides information about the Rancher version it is running and the Go version that was used to build it. That information is accessible via the `/version` path, which is used for tasks such as automating version bumps, or confirming that a deployment was successful. The upstream instance also provides Rancher version information accessible via the `/rancherversion` path.
@@ -14,8 +14,17 @@ Adversaries can misuse this information to identify the running Rancher version
See [OWASP Web Application Security Testing - Enumerate Infrastructure and Application Admin Interfaces](https://owasp.org/www-project-web-security-testing-guide/stable/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/05-Enumerate_Infrastructure_and_Application_Admin_Interfaces.html) for more information on protecting your server.
-### Session Management
+## Session Management
Some environments may require additional security controls for session management. For example, you may want to limit users' concurrent active sessions or restrict which geolocations those sessions can be initiated from. Such features are not supported by Rancher out of the box.
If you require such features, combine Layer 7 firewalls with [external authentication providers](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md#external-vs-local-authentication).
+
+## Use External Load Balancers to Protect Vulnerable Ports
+
+You should protect the following ports behind an [external load balancer](../../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) that has SSL offload enabled:
+
+- **K3s:** Port 6443, used by the Kubernetes API.
+- **RKE and RKE2:** Port 6443, used by the Kubernetes API, and port 9345, used for node registration.
+
+These ports have TLS SAN certificates which list nodes' public IP addresses. An attacker could use that information to gain unauthorized access or monitor activity on the cluster. Protecting these ports helps mitigate against nodes' public IP addresses being disclosed to potential attackers.
diff --git a/docs/reference-guides/rancher-security/rancher-security.md b/docs/reference-guides/rancher-security/rancher-security.md
index f6d56c11654..f16699b8ac6 100644
--- a/docs/reference-guides/rancher-security/rancher-security.md
+++ b/docs/reference-guides/rancher-security/rancher-security.md
@@ -27,11 +27,11 @@ Security is at the heart of all Rancher features. From integrating with all the
On this page, we provide security related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters.
-### NeuVector Integration with Rancher
+## NeuVector Integration with Rancher
NeuVector is an open-source, container-focused security application that is now integrated into Rancher. NeuVector provides production security, DevOps vulnerability protection, and a container firewall, et al. Please see the [Rancher docs](../../integrations-in-rancher/neuvector/neuvector.md) and the [NeuVector docs](https://open-docs.neuvector.com/) for more information.
-### Running a CIS Security Scan on a Kubernetes Cluster
+## Running a CIS Security Scan on a Kubernetes Cluster
Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the [CIS](https://www.cisecurity.org/cis-benchmarks/) (Center for Internet Security) Kubernetes Benchmark.
@@ -47,13 +47,13 @@ When Rancher runs a CIS security scan on a cluster, it generates a report showin
For details, refer to the section on [security scans](../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md).
-### SELinux RPM
+## SELinux RPM
[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8.
We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: `rancher-selinux` and `rke2-selinux`. For details, see [this page](selinux-rpm/selinux-rpm.md).
-### Rancher Hardening Guide
+## Rancher Hardening Guide
The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security.
@@ -63,7 +63,7 @@ The hardening guides provide prescriptive guidance for hardening a production in
Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher.
-### The CIS Benchmark and Self-Assessment
+## The CIS Benchmark and Self-Assessment
The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster.
@@ -71,7 +71,7 @@ Because Rancher and RKE install Kubernetes services as Docker containers, many o
Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark.
-### Third-party Penetration Test Reports
+## Third-party Penetration Test Reports
Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Previous penetration test reports are available below.
@@ -82,14 +82,14 @@ Results:
Please note that new reports are no longer shared or made publicly available.
-### Rancher Security Advisories and CVEs
+## Rancher Security Advisories and CVEs
Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](security-advisories-and-cves.md)
-### Kubernetes Security Best Practices
+## Kubernetes Security Best Practices
For recommendations on securing your Kubernetes cluster, refer to the [Kubernetes Security Best Practices](kubernetes-security-best-practices.md) guide.
-### Rancher Security Best Practices
+## Rancher Security Best Practices
For recommendations on securing your Rancher Manager deployments, refer to the [Rancher Security Best Practices](rancher-security-best-practices.md) guide.
diff --git a/docs/reference-guides/rancher-security/security-advisories-and-cves.md b/docs/reference-guides/rancher-security/security-advisories-and-cves.md
index c8441ae5f44..20526d9ff9d 100644
--- a/docs/reference-guides/rancher-security/security-advisories-and-cves.md
+++ b/docs/reference-guides/rancher-security/security-advisories-and-cves.md
@@ -10,7 +10,11 @@ Rancher is committed to informing the community of security issues in our produc
| ID | Description | Date | Resolution |
|----|-------------|------|------------|
-| [CVE-2024-22030](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22030) | A vulnerability was discovered in Rancher's and Fleet's agents, currently deemed a medium to high severity CVE, that under very specific circumstances allows a malicious actor to take over existing Rancher nodes. The attacker would need to have control of an expired domain or execute a DNS spoofing/hijacking attack against the domain in order to exploit this vulnerability. The targeted domain is the one used as the Rancher URL (the server-url of the Rancher cluster). At the moment there is no fix available and it affects all supported versions of Rancher. Customers and users are advised to follow the recommendations and best practices described in our [blog post](https://www.suse.com/c/rancher-security-update/). | 16 Feb 2024 | Pending |
+[CVE-2024-22030](https://github.com/rancher/rancher/security/advisories/GHSA-h4h5-9833-v2p4) | A high severity vulnerability was discovered in Rancher's agents that under very specific circumstances allows a malicious actor to take over existing Rancher nodes. The attacker needs to have control of an expired domain or execute a DNS spoofing/hijacking attack against the domain in order to exploit this vulnerability. The targeted domain is the one used as the Rancher URL (the `server-url` of the Rancher cluster). | 19 Sep 2024 | Rancher [v2.9.2](https://github.com/rancher/rancher/releases/tag/v2.9.2), [v2.8.8](https://github.com/rancher/rancher/releases/tag/v2.8.8) and [v2.7.15](https://github.com/rancher/rancher/releases/tag/v2.7.15) |
+| [CVE-2024-22032](https://github.com/rancher/rancher/security/advisories/GHSA-q6c7-56cq-g2wm) | An issue was discovered in Rancher versions up to and including 2.7.13 and 2.8.4, where custom secrets encryption configurations are stored in plaintext under the clusters `AppliedSpec`. This also causes clusters to continuously reconcile, as the `AppliedSpec` would never match the desired cluster `Spec`. The stored information contains the encryption configuration for secrets within etcd, and could potentially expose sensitive data if the etcd database was exposed directly. | 17 Jun 2024 | Rancher [v2.8.5](https://github.com/rancher/rancher/releases/tag/v2.8.5) and [v2.7.14](https://github.com/rancher/rancher/releases/tag/v2.7.14) |
+| [CVE-2023-32196](https://github.com/rancher/rancher/security/advisories/GHSA-64jq-m7rq-768h) | An issue was discovered in Rancher versions up to and including 2.7.13 and 2.8.4, where the webhook rule resolver ignores rules from a `ClusterRole` for an external `RoleTemplate` set with `.context=project` or `.context=""`. This allows a user to create an external `ClusterRole` with `.context=project` or `.context=""`, depending on the use of the new feature flag `external-rules` and backing `ClusterRole`. | 17 Jun 2024 | Rancher [v2.8.5](https://github.com/rancher/rancher/releases/tag/v2.8.5) and [v2.7.14](https://github.com/rancher/rancher/releases/tag/v2.7.14) |
+| [CVE-2023-22650](https://github.com/rancher/rancher/security/advisories/GHSA-9ghh-mmcq-8phc) | An issue was discovered in Rancher versions up to and including 2.7.13 and 2.8.4, where Rancher did not have a user retention process for when external authentication providers are used, that could be configured to run periodically and disable and/or delete inactive users. The new user retention process added in Rancher v2.8.5 and Rancher v2.7.14 is disabled by default. If enabled, a user becomes subject to the retention process if they don't log in for a configurable period of time. It's possible to set overrides for user accounts that are primarily intended for programmatic access (e.g. CI, scripts, etc.) so that they don't become subject to the retention process for a longer period of time or at all. | 17 Jun 2024 | Rancher [v2.8.5](https://github.com/rancher/rancher/releases/tag/v2.8.5) and [v2.7.14](https://github.com/rancher/rancher/releases/tag/v2.7.14) |
+| [CVE-2023-32191](https://github.com/rancher/rke/security/advisories/GHSA-6gr4-52w6-vmqx) | An issue was discovered in Rancher versions up to and including 2.7.13 and 2.8.4, in which supported RKE versions store credentials inside a ConfigMap that can be accessible by non-administrative users in Rancher. This vulnerability only affects an RKE-provisioned cluster. | 17 Jun 2024 | Rancher [v2.8.5](https://github.com/rancher/rancher/releases/tag/v2.8.5) and [v2.7.14](https://github.com/rancher/rancher/releases/tag/v2.7.14) |
| [CVE-2023-32193](https://github.com/rancher/norman/security/advisories/GHSA-r8f4-hv23-6qp6) | An issue was discovered in Rancher versions up to and including 2.6.13, 2.7.9 and 2.8.1, where multiple Cross-Site Scripting (XSS) vulnerabilities can be exploited via the Rancher UI (Norman). | 8 Feb 2024 | Rancher [v2.8.2](https://github.com/rancher/rancher/releases/tag/v2.8.2), [v2.7.10](https://github.com/rancher/rancher/releases/tag/v2.7.10) and [v2.6.14](https://github.com/rancher/rancher/releases/tag/v2.6.14) |
| [CVE-2023-32192](https://github.com/rancher/apiserver/security/advisories/GHSA-833m-37f7-jq55) | An issue was discovered in Rancher versions up to and including 2.6.13, 2.7.9 and 2.8.1, where multiple Cross-Site Scripting (XSS) vulnerabilities can be exploited via the Rancher UI (Apiserver). | 8 Feb 2024 | Rancher [v2.8.2](https://github.com/rancher/rancher/releases/tag/v2.8.2), [v2.7.10](https://github.com/rancher/rancher/releases/tag/v2.7.10) and [v2.6.14](https://github.com/rancher/rancher/releases/tag/v2.6.14) |
| [CVE-2023-22649](https://github.com/rancher/rancher/security/advisories/GHSA-xfj7-qf8w-2gcr) | An issue was discovered in Rancher versions up to and including 2.6.13, 2.7.9 and 2.8.1, in which sensitive data may be leaked into Rancher's audit logs. | 8 Feb 2024 | Rancher [v2.8.2](https://github.com/rancher/rancher/releases/tag/v2.8.2), [v2.7.10](https://github.com/rancher/rancher/releases/tag/v2.7.10) and [v2.6.14](https://github.com/rancher/rancher/releases/tag/v2.6.14) |
@@ -28,7 +32,7 @@ Rancher is committed to informing the community of security issues in our produc
| [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where a flaw with authorization logic allows privilege escalation in downstream clusters through cluster role template binding (CRTB) and project role template binding (PRTB). The vulnerability can be exploited by any user who has permissions to create/edit CRTB or PRTB (such as `cluster-owner`, `manage cluster members`, `project-owner`, and `manage project members`) to gain owner permission in another project in the same cluster or in another project on a different downstream cluster. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | It was discovered that in Rancher versions up to and including 2.5.12 and 2.6.3, there is a failure to properly sanitize credentials in cluster template answers. This failure can lead to plaintext storage and exposure of credentials, passwords, and API tokens. The exposed credentials are visible in Rancher to authenticated `Cluster Owners`, `Cluster Members`, `Project Owners`, and `Project Members` on the endpoints `/v1/management.cattle.io.clusters`, `/v3/clusters`, and `/k8s/clusters/local/apis/management.cattle.io/v3/clusters`. | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | An issue was discovered in Rancher versions up to and including 2.5.15 and 2.6.6 where sensitive fields like passwords, API keys, and Rancher's service account token (used to provision clusters) were stored in plaintext directly on Kubernetes objects like `Clusters` (e.g., `cluster.management.cattle.io`). Anyone with read access to those objects in the Kubernetes API could retrieve the plaintext version of those sensitive data. The issue was partially found and reported by Florian Struck (from [Continum AG](https://www.continum.net/)) and [Marco Stuurman](https://github.com/fe-ax) (from [Shock Media B.V.](https://www.shockmedia.nl/)). | 18 August 2022 | [Rancher v2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) and [Rancher v2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
-| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](../../faq/container-network-interface-providers.md#weave) Container Network Interface (CNI) when configured through [RKE templates](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/about-rke1-templates.md). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
+| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](../../faq/container-network-interface-providers.md#weave) Container Network Interface (CNI) when configured through [RKE templates](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/about-rke1-templates.md). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://github.com/weaveworks/weave/blob/master/site/tasks/manage/security-untrusted-networks.md) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Continuous Delivery with Fleet](../../integrations-in-rancher/fleet/fleet.md) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
diff --git a/docs/reference-guides/rancher-webhook.md b/docs/reference-guides/rancher-webhook.md
index 3b8005cba74..39223b92d4e 100644
--- a/docs/reference-guides/rancher-webhook.md
+++ b/docs/reference-guides/rancher-webhook.md
@@ -8,7 +8,8 @@ title: Rancher Webhook
Rancher-Webhook is an essential component of Rancher that works in conjunction with Kubernetes to enhance security and enable critical features for Rancher-managed clusters.
-It integrates with Kubernetes' extensible admission controllers, as described in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which allows Rancher-Webhook to inspect specific requests sent to the Kubernetes API server, and add custom, Rancher-specific validation and mutations to the requests that are specific to Rancher. Rancher-Webhook manages the resources to be validated using the `rancher.cattle.io` `ValidatingWebhookConfiguration` and the `rancher.cattle.io` `MutatingWebhookConfiguration`, and will override any manual edits.
+It integrates with Kubernetes' extensible admission controllers, as described in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which allows Rancher-Webhook to inspect specific requests sent to the Kubernetes API server, and add custom validations and mutations to the requests that are specific to Rancher. Rancher-Webhook manages the resources to be validated using the `rancher.cattle.io` `ValidatingWebhookConfiguration` and the `rancher.cattle.io` `MutatingWebhookConfiguration` objects, and will override any manual edits.
+
Rancher deploys Rancher-Webhook as a separate deployment and service in both local and downstream clusters. Rancher manages Rancher-Webhook using Helm. It's important to note that Rancher may override modifications made by users to the Helm release. To safely modify these values see [Customizing Rancher-Webhook Configuration](#customizing-rancher-webhook-configuration).
Each Rancher version is designed to be compatible with a single version of the webhook. The compatible versions are provided below for convenience.
@@ -19,10 +20,9 @@ Each Rancher version is designed to be compatible with a single version of the w
| Rancher Version | Webhook Version | Availability in Prime | Availability in Community |
|-----------------|-----------------|-----------------------|---------------------------|
-| v2.8.3 | v0.4.3 | ✓ | ✓ |
-| v2.8.2 | v0.4.2 | ✓ | ✓ |
-| v2.8.1 | v0.4.2 | ✓ | ✓ |
-| v2.8.0 | v0.4.2 | ✗ | ✓ |
+| v2.9.2 | v0.5.2 | ✓ | ✓ |
+| v2.9.1 | v0.5.1 | ✓ | ✓ |
+| v2.9.0 | v0.5.0 | ✗ | ✓ |
## Why Do We Need It?
@@ -53,6 +53,7 @@ kubectl create -f example.yaml --as=system:serviceaccount:cattle-system:rancher-
## Customizing Rancher-Webhook Configuration
You can add custom Helm values when you install Rancher-Webhook via Helm. During a Helm install of the Rancher-Webhook chart, Rancher checks for custom Helm values. These custom values must be defined in a ConfigMap named `rancher-config`, in the `cattle-system` namespace, under the data key, `rancher-webhook`. The value of this key must be valid YAML.
+
``` yaml
apiVersion: v1
kind: ConfigMap
@@ -71,6 +72,7 @@ Rancher redeploys the Rancher-Webhook chart when changes to the ConfigMap values
### Customizing Rancher-Webhook During Rancher Installation
When you use Helm to install the Rancher chart, you can add custom Helm values to the Rancher-Webhook of the local cluster. All values in the Rancher-Webhook chart are accessible as nested variables under the `webhook` name.
+
These values are synced to the `rancher-config` ConfigMap during installation.
```bash
@@ -135,11 +137,3 @@ The webhook provides extra validations on [namespaces](https://github.com/ranche
If you roll back to Rancher v2.7.5 or earlier, you may see webhook versions that are too recent to be compatible with downstream clusters running pre-v2.7.5 version of Rancher. This may cause various incompatibility issues. For example, project members may be unable to create namespaces. In addition, when you roll back to versions before the webhook was installed in downstream clusters, the webhook may remain installed, which can result in similar incompatibility issues.
To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version.
-
-### Project Users Can't Create Namespaces
-
-**Note:** The following affects Rancher v2.7.2 - v2.7.4.
-
-Project users may not be able to create namespaces in projects. This includes project owners. This issue is caused by Rancher automatically upgrading the webhook to a version compatible with a more recent version of Rancher than the one currently installed.
-
-To help alleviate these issues, you can run the [adjust-downstream-webhook](https://github.com/rancherlabs/support-tools/tree/master/adjust-downstream-webhook) shell script after roll back. This script selects and installs the proper webhook version (or removes the webhook entirely) for the corresponding Rancher version.
diff --git a/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md b/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md
index 4d410831bf9..c4dcde046d9 100644
--- a/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md
+++ b/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md
@@ -6,7 +6,7 @@ title: Advanced Options for Docker Installs
-### Custom CA Certificate
+## Custom CA Certificate
If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate.
@@ -30,7 +30,7 @@ docker run -d --restart=unless-stopped \
rancher/rancher:latest
```
-### API Audit Log
+## API Audit Log
The API Audit Log records all the user and system transactions made through Rancher server.
@@ -49,7 +49,7 @@ docker run -d --restart=unless-stopped \
rancher/rancher:latest
```
-### TLS settings
+## TLS settings
To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version:
@@ -65,7 +65,7 @@ Privileged access is [required.](../../getting-started/installation-and-upgrade/
See [TLS settings](../../getting-started/installation-and-upgrade/installation-references/tls-settings.md) for more information and options.
-### Air Gap
+## Air Gap
If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`.
@@ -73,7 +73,7 @@ If you are visiting this page to complete an air gap installation, you must prep
/rancher/rancher:latest
-### Persistent Data
+## Persistent Data
Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`.
@@ -89,7 +89,7 @@ docker run -d --restart=unless-stopped \
Privileged access is [required.](../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher)
-### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node
+## Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node
In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container.
diff --git a/docs/reference-guides/user-settings/api-keys.md b/docs/reference-guides/user-settings/api-keys.md
index d5ff1d1fb24..634a7396d33 100644
--- a/docs/reference-guides/user-settings/api-keys.md
+++ b/docs/reference-guides/user-settings/api-keys.md
@@ -21,7 +21,7 @@ API Keys are composed of four components:
:::note
-Users may opt to enable [token hashing](../about-the-api/api-tokens.md).
+Users may opt to enable [token hashing](../../api/api-tokens.md).
:::
@@ -35,9 +35,9 @@ Users may opt to enable [token hashing](../about-the-api/api-tokens.md).
The API key won't be valid after expiration. Shorter expiration periods are more secure.
- Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period.
+ Expiration period is bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, the API key is created with max-ttl as the expiration period.
- A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) for more information.
+ A scope limits the API key so that it only works against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you are able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-authorized-cluster-endpoint) for more information.
4. Click **Create**.
@@ -49,7 +49,7 @@ Users may opt to enable [token hashing](../about-the-api/api-tokens.md).
## What's Next?
-- Enter your API key information into the application that will send requests to the Rancher API.
+- Enter your API key information into the application that sends requests to the Rancher API.
- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI.
- API keys are used for API calls and [Rancher CLI](../cli-with-rancher/cli-with-rancher.md).
diff --git a/docs/reference-guides/user-settings/user-preferences.md b/docs/reference-guides/user-settings/user-preferences.md
index b784e3bb168..65c801ff89a 100644
--- a/docs/reference-guides/user-settings/user-preferences.md
+++ b/docs/reference-guides/user-settings/user-preferences.md
@@ -41,8 +41,6 @@ Choose how certain information is displayed:
## Confirmation Setting
-_Available as of v2.7.2_
-
Choose whether to ask for confirmation when scaling down node pools.
## Advanced Features
diff --git a/docs/security/security-scan/security-scan.md b/docs/security/security-scan/security-scan.md
deleted file mode 100644
index 8c58771c736..00000000000
--- a/docs/security/security-scan/security-scan.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: Security Scans
----
-
-
- https://ranchermanager.docs.rancher.com/how-to-guides/advanced-user-guides/cis-scan-guides
-
-
-The documentation about CIS security scans has moved [here.](../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)
diff --git a/docs/troubleshooting/other-troubleshooting-tips/dns.md b/docs/troubleshooting/other-troubleshooting-tips/dns.md
index af1108b6f80..b4a6989f6b7 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/dns.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/dns.md
@@ -12,7 +12,7 @@ Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG
Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails.
-### Check if DNS pods are running
+## Check if DNS pods are running
```
kubectl -n kube-system get pods -l k8s-app=kube-dns
@@ -30,7 +30,7 @@ NAME READY STATUS RESTARTS AGE
kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s
```
-### Check if the DNS service is present with the correct cluster-ip
+## Check if the DNS service is present with the correct cluster-ip
```
kubectl -n kube-system get svc -l k8s-app=kube-dns
@@ -41,7 +41,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s
```
-### Check if domain names are resolving
+## Check if domain names are resolving
Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service.
@@ -132,15 +132,15 @@ command terminated with exit code 1
Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`.
-### CoreDNS specific
+## CoreDNS specific
-#### Check CoreDNS logging
+### Check CoreDNS logging
```
kubectl -n kube-system logs -l k8s-app=kube-dns
```
-#### Check configuration
+### Check configuration
CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace.
@@ -148,7 +148,7 @@ CoreDNS configuration is stored in the configmap `coredns` in the `kube-system`
kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}}
```
-#### Check upstream nameservers in resolv.conf
+### Check upstream nameservers in resolv.conf
By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on.
@@ -156,7 +156,7 @@ By default, the configured nameservers on the host (in `/etc/resolv.conf`) will
kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf'
```
-#### Enable query logging
+### Enable query logging
Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place:
@@ -166,9 +166,9 @@ kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log
All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging).
-### kube-dns specific
+## kube-dns specific
-#### Check upstream nameservers in kubedns container
+### Check upstream nameservers in kubedns container
By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`).
diff --git a/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md
index 106479c0bb7..fc8e957c4af 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md
@@ -10,14 +10,15 @@ For Rancher versions that have `rancher-webhook` installed, certain versions cre
In Rancher v2.6.3 and up, rancher-webhook deployments will automatically renew their TLS certificate when it is within 30 or fewer days of its expiration date. If you are using v2.6.2 or below, there are two methods to work around this issue:
-##### 1. Users with cluster access, run the following commands:
+## 1. Users with Cluster Access, Run the Following Commands:
+
```
kubectl delete secret -n cattle-system cattle-webhook-tls
kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io --ignore-not-found=true rancher.cattle.io
kubectl delete pod -n cattle-system -l app=rancher-webhook
```
-##### 2. Users with no cluster access via `kubectl`:
+## 2. Users with No Cluster Access Via `kubectl`:
1. Delete the `cattle-webhook-tls` secret in the `cattle-system` namespace in the local cluster.
diff --git a/docs/troubleshooting/other-troubleshooting-tips/networking.md b/docs/troubleshooting/other-troubleshooting-tips/networking.md
index d67a1cdb793..92bd7cf56b6 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/networking.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/networking.md
@@ -10,11 +10,12 @@ The commands/steps listed on this page can be used to check networking related i
Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI.
-### Double check if all the required ports are opened in your (host) firewall
+## Double Check if All the Required Ports are Opened in Your (Host) Firewall
Double check if all the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP.
-### Check if overlay network is functioning correctly
+
+## Check if Overlay Network is Functioning Correctly
The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod.
@@ -98,7 +99,7 @@ The `swiss-army-knife` container does not support Windows nodes. It also [does n
6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`.
-### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices
+### Check if MTU is Correctly Configured on Hosts and on Peering/Tunnel Appliances/Devices
When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to:
@@ -107,20 +108,3 @@ When the MTU is incorrectly configured (either on hosts running Rancher, nodes i
* `read tcp: i/o timeout`
See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes.
-
-### Resolved issues
-
-#### Overlay network broken when using Canal/Flannel due to missing node annotations
-
-| | |
-|------------|------------|
-| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) |
-| Resolved in | v2.1.2 |
-
-To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed):
-
-```
-kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name'
-```
-
-If there is no output, the cluster is not affected.
diff --git a/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md b/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md
index d0c3957cf62..25845cdc87d 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md
@@ -10,7 +10,7 @@ The commands/steps listed on this page can be used to check your Rancher Kuberne
Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`).
-### Check Rancher pods
+## Check Rancher Pods
Rancher pods are deployed as a Deployment in the `cattle-system` namespace.
@@ -31,25 +31,25 @@ rancher-7dbd7875f7-qw7wb 1/1 Running 0 8m x.x.x.x x.x.x.
If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events.
-#### Pod details
+### Pod Details
```
kubectl -n cattle-system describe pods -l app=rancher
```
-#### Pod container logs
+### Pod Container Logs
```
kubectl -n cattle-system logs -l app=rancher
```
-#### Namespace events
+### Namespace Events
```
kubectl -n cattle-system get events
```
-### Check ingress
+## Check Ingress
Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (host address(es) it will be routed to).
@@ -64,7 +64,7 @@ NAME HOSTS ADDRESS PORTS AGE
rancher rancher.yourdomain.com x.x.x.x,x.x.x.x,x.x.x.x 80, 443 2m
```
-### Check ingress controller logs
+## Check Ingress Controller Logs
When accessing your configured Rancher FQDN does not show you the UI, check the ingress controller logging to see what happens when you try to access Rancher:
@@ -72,7 +72,7 @@ When accessing your configured Rancher FQDN does not show you the UI, check the
kubectl -n ingress-nginx logs -l app=ingress-nginx
```
-### Leader election
+## Leader Election
The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `cattle-controllers` Lease in the `kube-system` namespace (in this example, `rancher-dbc7ff869-gvg6k`).
@@ -86,3 +86,27 @@ Example output:
NAME HOLDER AGE
cattle-controllers rancher-dbc7ff869-gvg6k 6h10m
```
+
+### Configuration
+
+_Available as of Rancher 2.8.3_
+
+If the Kubernetes API experiences latency, the Rancher replica holding the leader lock may not be able to renew the lease before the lease becomes invalid, which can be observed in the Rancher logs:
+```
+E0629 04:13:07.293461 34 leaderelection.go:364] Failed to update lock: Put "https://172.17.0.1:443/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cattle-controllers?timeout=15m0s": context deadline exceeded
+I0629 04:13:07.293594 34 leaderelection.go:280] failed to renew lease kube-system/cattle-controllers: timed out waiting for the condition
+...
+2024/06/29 04:13:10 [FATAL] leaderelection lost for cattle-controllers
+```
+
+To mitigate this, you can set environment variables in the `rancher` Deployment to modify the default parameters for leader election:
+- `CATTLE_ELECTION_LEASE_DURATION`: The [lease duration](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection#LeaderElectionConfig.LeaseDuration). The default value is 45s.
+- `CATTLE_ELECTION_RENEW_DEADLINE`: The [renew deadline](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection#LeaderElectionConfig.RenewDeadline). The default value is 30s.
+- `CATTLE_ELECTION_RETRY_PERIOD`: The [retry period](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection#LeaderElectionConfig.RetryPeriod). The default value is 2s.
+
+Example:
+```
+kubectl -n cattle-system set env deploy/rancher CATTLE_ELECTION_LEASE_DURATION=2m CATTLE_ELECTION_RENEW_DEADLINE=90s CATTLE_ELECTION_RETRY_PERIOD=10s
+```
+This will temporarily increase the lease duration, renew deadline and retry period to 120, 90 and 10 seconds respectively.
+Alternatively, in order to make such changes permanent, these environment variables can be set by [using Helm values](../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#setting-extra-environment-variables) instead.
diff --git a/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md
index cce0e089621..f58fc038255 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md
@@ -10,13 +10,13 @@ The commands/steps listed on this page can be used to check clusters that you ar
Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`)
-### Rancher agents
+## Rancher Agents
Communication to the cluster (Kubernetes API via cattle-cluster-agent) and communication to the nodes is done through Rancher agents.
If the cattle-cluster-agent cannot connect to the configured `server-url`, the cluster will remain in **Pending** state, showing `Waiting for full cluster configuration`.
-#### cattle-node-agent
+### cattle-node-agent
:::note
@@ -49,7 +49,7 @@ Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods:
kubectl -n cattle-system logs -l app=cattle-agent
```
-#### cattle-cluster-agent
+### cattle-cluster-agent
Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts:
diff --git a/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md
index 6a25ae1565e..adecdecde12 100644
--- a/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md
+++ b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md
@@ -20,7 +20,7 @@ Now with this feature, a downstream cluster admin should be able to look at the
If the audit logs are shipped off of the cluster, a user of the logging system should be able to identify the user in the external Identity Provider system.
A Rancher Admin should now be able to view Rancher audit logs and follow through to the Kubernetes audit log by using the external Identity Provider username.
-### Feature Description
+## Feature Description
- When Kubernetes Audit logs are enabled on the downstream cluster, in each event that is logged, the external Identity Provider's username is now logged for each request, at the "metadata" level.
- When Rancher API Audit logs are enabled on the Rancher installation, the external Identity Provider's username is also logged now at the `auditLog.level=1` for each request that hits the Rancher API server, including the login requests.
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 938b3a79b3a..32ccc282a6c 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -184,6 +184,11 @@ module.exports = {
current: {
label: 'Latest',
},
+ 2.9: {
+ label: 'v2.9',
+ path: 'v2.9',
+ banner: 'none'
+ },
2.8: {
label: 'v2.8',
path: 'v2.8',
@@ -1717,10 +1722,6 @@ module.exports = {
to: '/v2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates',
from: '/v2.8/pages-for-subheaders/about-rke1-templates'
},
- {
- to: '/v2.8/reference-guides/about-the-api',
- from: '/v2.8/pages-for-subheaders/about-the-api'
- },
{
to: '/v2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters',
from: '/v2.8/pages-for-subheaders/access-clusters'
@@ -2061,10 +2062,6 @@ module.exports = {
to: '/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates',
from: '/pages-for-subheaders/about-rke1-templates'
},
- {
- to: '/reference-guides/about-the-api',
- from: '/pages-for-subheaders/about-the-api'
- },
{
to: '/how-to-guides/new-user-guides/manage-clusters/access-clusters',
from: '/pages-for-subheaders/access-clusters'
@@ -2397,6 +2394,19 @@ module.exports = {
to: '/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods',
from: '/pages-for-subheaders/workloads-and-pods'
}, // Redirects for pages-for-subheaders removal [latest] (end)
+
+ { // Redirects for dashboard#12040 (start)
+ to: '/v2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-google-oauth',
+ from: '/v2.9/admin-settings/authentication/google',
+ },
+ {
+ to: '/v2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides',
+ from: '/v2.9/monitoring-alerting/configuration',
+ },
+ {
+ to: '/v2.9/integrations-in-rancher/monitoring-and-alerting',
+ from: '/v2.9/monitoring-alerting',
+ }, // Redirects for dashboard#12040 (end)
{ // Redirects for dashboard#9970
to: '/v2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences',
from: '/v2.8/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/'
@@ -3452,7 +3462,25 @@ module.exports = {
{
to: '/v2.7/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale',
from: '/v2.7/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher'
+ },
+ // Redirects for restructure from PR #1147 (start)
+ {
+ to: '/v2.8/api/v3-rancher-api-guide',
+ from: ['/v2.8/reference-guides/about-the-api', '/v2.8/pages-for-subheaders/about-the-api']
+ },
+ {
+ to: '/v2.8/api/api-tokens',
+ from: '/v2.8/reference-guides/about-the-api/api-tokens'
+ },
+ {
+ to: '/api/v3-rancher-api-guide',
+ from: ['/reference-guides/about-the-api', '/pages-for-subheaders/about-the-api']
+ },
+ {
+ to: '/api/api-tokens',
+ from: '/reference-guides/about-the-api/api-tokens'
}
+ // Redirects for restructure from PR #1147 (end)
],
},
],
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/api/api-tokens.md b/i18n/zh/docusaurus-plugin-content-docs/current/api/api-tokens.md
new file mode 100644
index 00000000000..cc0d08cbfb1
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/api/api-tokens.md
@@ -0,0 +1,87 @@
+---
+title: API 令牌
+---
+
+默认情况下,某些集群级别的 API 令牌是使用无限期 TTL(`ttl=0`)生成的。换言之,除非你让令牌失效,否则 `ttl=0` 的 API 令牌永远不会过期。令牌不会因为更改密码而失效。
+
+要停用 API 令牌,你可以删除令牌或停用用户账号。
+
+## 删除令牌
+要删除令牌:
+
+1. 转到 `https:///v3/tokens`,在 Rancher API 视图中查看包含所有令牌的列表。
+
+1. 通过 ID 访问要删除的令牌。例如,`https:///v3/tokens/kubectl-shell-user-vqkqt`。
+
+1. 单击**删除**。
+
+以下是使用 `ttl=0` 生成的完整令牌列表:
+
+| 令牌 | 描述 |
+| ----------------- | -------------------------------------------------------------------------------------- |
+| `kubeconfig-*` | Kubeconfig 令牌 |
+| `kubectl-shell-*` | 在浏览器中访问 `kubectl` shell |
+| `agent-*` | Agent deployment 令牌 |
+| `compose-token-*` | compose 令牌 |
+| `helm-token-*` | Helm Chart deployment 令牌 |
+| `telemetry-*` | 遥测令牌 |
+| `drain-node-*` | 用于清空的令牌(由于没有原生 Kubernetes API,我们使用 `kubectl` 来清空) |
+
+
+## 在 Kubeconfig 令牌上设置 TTL
+
+管理员可以在 Kubeconfig 令牌上设置全局存活时间 (time-to-live,TTL)。如需更改默认 kubeconfig TTL,你可以导航到全局设置并将 [`kubeconfig-default-token-ttl-minutes`](#kubeconfig-default-token-ttl-minutes) 设置为所需的持续时间(单位:分钟)。[`kubeconfig-default-token-ttl-minutes`](#kubeconfig-default-token-ttl-minutes) 的默认值为 0,表示令牌永不过期。
+
+:::note
+
+除了由 CLI 创建的用于[生成 kubeconfig 令牌](#在生成的-kubeconfig-中禁用令牌)的令牌之外,所有 kubeconfig 令牌都使用此设置。
+
+:::
+
+## 在生成的 Kubeconfig 中禁用令牌
+
+1. 将 `kubeconfig-generate-token` 设置为 `false`。此设置让 Rancher 不再在用户单击下载 kubeconfig 文件时自动生成令牌。如果停用此设置,生成的 kubeconfig 将引用 [Rancher CLI](../reference-guides/cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证) 来检索集群的短期令牌。当这个 kubeconfig 在客户端(例如 `kubectl`)中使用时,你需要安装 Rancher CLI 来完成登录请求。
+
+2. 将 `kubeconfig-token-ttl-minutes` 设置为所需的时长(单位:分钟)。`kubeconfig-token-ttl-minutes` 默认设置为 960(即 16 小时)。
+
+## 令牌哈希
+
+你可以启用令牌哈希,令牌将使用 SHA256 算法进行单向哈希。这是一个不可逆的操作,一旦启用,此功能将无法禁用。在启用功能或在测试环境中评估之前,建议你先进行备份。
+
+要启用令牌哈希,请参阅[本节](../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
+
+此功能将影响所有令牌,包括但不限于以下内容:
+
+- Kubeconfig 令牌
+- 持有者令牌 API 密钥/调用
+- 内部操作使用的令牌
+
+## 令牌设置
+
+以下全局设置会影响 Rancher 令牌的行为:
+
+| 设置 | 描述 |
+| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| [`auth-user-session-ttl-minutes`](#auth-user-session-ttl-minutes) | 用户认证会话令牌的 TTL(单位:分钟)。 |
+| [`kubeconfig-default-token-TTL-minutes`](#kubeconfig-default-token-ttl-minutes) | 默认 TTL,应用于所有 kubeconfig 令牌(除了[由 Rancher CLI 生成的令牌](#在生成的-kubeconfig-中禁用令牌))。**此设置从 2.6.6 版本开始引入。** |
+| [`kubeconfig-token-ttl-minutes`](#kubeconfig-token-ttl-minutes) | 在 CLI 中生成的令牌 TTL。**自 2.6.6 起已弃用,并将在 2.8.0 中删除**。请知悉,`kubeconfig-default-token-TTL-minutes` 将用于所有 kubeconfig 令牌。 |
+| [`auth-token-max-ttl-minutes`](#auth-token-max-ttl-minutes) | 除了由 [`auth-user-session-ttl-minutes`](#auth-user-session-ttl-minutes) 控制的令牌外,所有令牌的最大 TTL。 |
+| [`kubeconfig-generate-token`](#kubeconfig-generate-token) | 如果为 true,则在用户下载 kubeconfig 时自动生成令牌。 |
+
+### auth-user-session-ttl-minutes
+存活时间(TTL)(单位:分钟),用于确定用户身份验证会话令牌的到期时间。过期后,用户将需要登录并获取新令牌。此设置不受 [`auth-token-max-ttl-minutes`](#auth-token-max-ttl-minutes) 的影响。会话令牌是在用户登录 Rancher 时创建的。
+
+### kubeconfig-default-token-TTL-minutes
+存活时间(TTL)(单位:分钟),用于确定 kubeconfig 令牌的到期时间。令牌过期后,API 将拒绝令牌。此设置的值不能大于 [`auth-token-max-ttl-minutes`](#auth-token-max-ttl-minutes) 的值。此设置适用于在请求的 kubeconfig 文件中生成的令牌,不包括[由 Rancher CLI 生成的](#在生成的-kubeconfig-中禁用令牌)令牌。
+**此设置从 2.6.6 版本开始引入**。
+
+### kubeconfig-token-ttl-minutes
+存活时间(TTL)(单位:分钟),用于确定由 CLI 生成的 kubeconfig 令牌的到期时间。当 [`kubeconfig-generate-token`](#kubeconfig-generate-token) 设为 false 时,则由 CLI 生成令牌。令牌过期后,API 将拒绝令牌。此设置的值不能大于 [`auth-token-max-ttl-minutes`](#auth-token-max-ttl-minutes) 的值。
+**自版本 2.6.6 起已弃用,并将在 2.8.0 中删除。请知悉,此设置将被 [`kubeconfig-default-token-TTL-minutes`](#kubeconfig-default-token-ttl-minutes) 的值替换**。
+
+### auth-token-max-ttl-minutes
+身份验证令牌的最大生存时间 (TTL)(单位:分钟)。如果用户尝试创建一个 TTL 大于 `auth-token-max-ttl-minutes` 的令牌,Rancher 会将令牌 TTL 设置为 `auth-token-max-ttl-minutes` 的值。身份验证令牌是为验证 API 请求而创建的。
+**2.6.6 版本更改:适用于所有 kubeconfig 令牌和 API 令牌。**
+
+### kubeconfig-generate-token
+如果设置为 true,则通过 UI 请求的 kubeconfig 将包含一个有效的令牌。如果设置为 false,kubeconfig 将包含一个使用 Rancher CLI 提示用户登录的命令。然后,[CLI 将为用户检索和缓存令牌](../reference-guides/cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/about-the-api.md b/i18n/zh/docusaurus-plugin-content-docs/current/api/v3-rancher-api-guide.md
similarity index 93%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/about-the-api.md
rename to i18n/zh/docusaurus-plugin-content-docs/current/api/v3-rancher-api-guide.md
index f1756eb6e5d..89872d72845 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/about-the-api.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/api/v3-rancher-api-guide.md
@@ -27,7 +27,7 @@ API 有自己的用户界面,你可以从 Web 浏览器访问它。这是查
## 认证
-API 请求必须包含认证信息。认证是通过 [API 密钥](../user-settings/api-keys.md)使用 HTTP 基本认证完成的。API 密钥可以创建新集群并通过 `/v3/clusters/` 访问多个集群。[集群和项目角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)会应用于这些键,并限制账号可以查看的集群和项目以及可以执行的操作。
+API 请求必须包含认证信息。认证是通过 [API 密钥](../reference-guides/user-settings/api-keys.md)使用 HTTP 基本认证完成的。API 密钥可以创建新集群并通过 `/v3/clusters/` 访问多个集群。[集群和项目角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)会应用于这些键,并限制账号可以查看的集群和项目以及可以执行的操作。
默认情况下,某些集群级别的 API 令牌是使用无限期 TTL(`ttl=0`)生成的。换言之,除非你让令牌失效,否则 `ttl=0` 的 API 令牌永远不会过期。有关如何使 API 令牌失效的详细信息,请参阅 [API 令牌](api-tokens.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/backups/docker-installs/docker-installs.md b/i18n/zh/docusaurus-plugin-content-docs/current/backups/docker-installs/docker-installs.md
deleted file mode 100644
index 51c3001d777..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/backups/docker-installs/docker-installs.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: 备份和恢复 Docker 安装的 Rancher
----
-
-- [备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)
-- [还原](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/cluster-provisioning/rke-clusters/options/options.md b/i18n/zh/docusaurus-plugin-content-docs/current/cluster-provisioning/rke-clusters/options/options.md
deleted file mode 100644
index 39c332461ce..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/cluster-provisioning/rke-clusters/options/options.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: RKE 集群配置
----
-
-本文已迁移到[此处](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/container-network-interface-providers.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/container-network-interface-providers.md
index ad796bb5c4c..0d9c93e18fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/container-network-interface-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/container-network-interface-providers.md
@@ -90,7 +90,7 @@ Kubernetes worker 需要打开 TCP 端口 `6783`(控制端口)、UDP 端口
有关详细信息,请参阅以下页面:
-- [Weave Net 官网](https://www.weave.works/)
+- [Weave Net 官网](https://github.com/weaveworks/weave/blob/master/site/overview.md)
### RKE2 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features-in-v2.5.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features-in-v2.5.md
index ec1898663b2..0e11d4e793a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features-in-v2.5.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features-in-v2.5.md
@@ -2,11 +2,11 @@
title: Rancher 弃用的功能
---
-### Rancher 的弃用策略是什么?
+## Rancher 的弃用策略是什么?
我们在支持[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方弃用策略。
-### 在哪里可以找到 Rancher 已弃用的功能?
+## 在哪里可以找到 Rancher 已弃用的功能?
Rancher 会在 GitHub 上的[发行说明](https://github.com/rancher/rancher/releases)中公布已弃用的功能。请参阅以下补丁版本了解已弃用的功能:
@@ -20,7 +20,6 @@ Rancher 会在 GitHub 上的[发行说明](https://github.com/rancher/rancher/re
| [2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) | 2022 年 5 月 12 日 |
| [2.6.6](https://github.com/rancher/rancher/releases/tag/v2.6.6) | 2022 年 6 月 30 日 |
+## 如果某个功能标记为弃用,我要怎么做?
-### 如果某个功能标记为弃用,我要怎么做?
-
-如果某个发行版将某功能标记为"Deprecated"(已弃用),该功能仍然可用并受支持,从而允许用户按照常规流程进行升级。在升级到该功能被标记为"已删除"的发行版前,用户/管理员应该计划剥离该功能。对于新部署,我们建议不要使用已弃用的功能。
\ No newline at end of file
+如果某个发行版将某功能标记为"Deprecated"(已弃用),该功能仍然可用并受支持,从而允许用户按照常规流程进行升级。在升级到该功能被标记为"已删除"的发行版前,用户/管理员应该计划剥离该功能。对于新部署,我们建议不要使用已弃用的功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features.md
index b594bd0a42b..121efe550dd 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/deprecated-features.md
@@ -6,11 +6,11 @@ title: Rancher 中已弃用的功能
-### Rancher 的弃用策略是什么?
+## Rancher 的弃用策略是什么?
我们已经在支持的[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方的弃用策略。
-### 在哪里可以了解 Rancher 中已弃用哪些功能?
+## 在哪里可以了解 Rancher 中已弃用哪些功能?
Rancher 将在 GitHub 上发布的 Rancher 的[发版说明](https://github.com/rancher/rancher/releases)中发布已弃用的功能。有关已弃用的功能,请参阅以下的补丁版本:
@@ -21,6 +21,6 @@ Rancher 将在 GitHub 上发布的 Rancher 的[发版说明](https://github.com/
| [2.8.1](https://github.com/rancher/rancher/releases/tag/v2.8.1) | 2024 年 1 月 22 日 |
| [2.8.0](https://github.com/rancher/rancher/releases/tag/v2.8.0) | 2023 年 12 月 6 日 |
-### 当一个功能被标记为弃用我可以得到什么样的预期?
+## 当一个功能被标记为弃用我可以得到什么样的预期?
当功能被标记为“已弃用”时,它依然可用并得到支持,允许按照常规的流程进行升级。一旦升级完成,用户/管理员应开始计划在升级到标记为已移除的版本之前放弃使用已弃用的功能。对于新的部署,建议不要使用已弃用的功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/dockershim.md
index cfab0dfbaf4..cb1d658b131 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/dockershim.md
@@ -14,19 +14,19 @@ enable_cri_dockerd: true
如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。即使在 Kubernetes 1.24 删除了树内 Dockershim 之后,你也可以通过 Rancher 升级和管理导入的 RKE2 和 K3s Kubernetes 集群。
-### 常见问题
+## 常见问题
-Q. 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
+Q: 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
对于 RKE,Dockershim 的上游支持从 Kubernetes 1.21 开始。你需要使用 Rancher 2.6 或更高版本才能获取使用 Kubernetes 1.21 的 RKE 的支持。详情请参阅我们的[支持矩阵](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
-Q. 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
+Q: 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
-A. 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
+A: 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/general-faq.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/general-faq.md
index 5cf116534af..cb1fc3858b9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/general-faq.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/general-faq.md
@@ -16,7 +16,7 @@ title: 一般常见问题解答
## 是否可以使用 Rancher 2.x 管理 Azure Kubernetes 服务?
-是的。请参阅我们的[集群管理]((../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md))指南,了解 AKS 上可用的 Rancher 功能,以及相关的 [AKS 的文档](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)
+是的。请参阅我们的[集群管理](../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md)指南,了解 AKS 上可用的 Rancher 功能,以及相关的 [AKS 的文档](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)
## Rancher 是否支持 Windows?
@@ -24,7 +24,7 @@ Rancher 支持 Windows Server 1809 容器。有关如何使用 Windows Worker
## Rancher 是否支持 Istio?
-Rancher 支持 [Istio](../pages-for-subheaders/istio.md)。
+Rancher 支持 [Istio](../integrations-in-rancher/istio/istio.md)。
## Rancher 2.x 是否支持使用 Hashicorp 的 Vault 来存储密文?
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/install-and-configure-kubectl.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/install-and-configure-kubectl.md
index 21c301639b6..2b9764b84f2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/install-and-configure-kubectl.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/install-and-configure-kubectl.md
@@ -4,11 +4,11 @@ title: 安装和配置 kubectl
`kubectl` 是一个 CLI 工具,用于运行 Kubernetes 集群相关的命令。Rancher 2.x 中的许多维护和管理任务都需要它。
-### 安装
+## 安装
请参阅 [kubectl 安装](https://kubernetes.io/docs/tasks/tools/install-kubectl/)将 kubectl 安装到你的操作系统上。
-### 配置
+## 配置
使用 RKE 创建 Kubernetes 集群时,RKE 会在本地目录中创建一个 `kube_config_cluster.yml`,该文件包含使用 `kubectl` 或 `helm` 等工具连接到新集群的凭证。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/rancher-is-no-longer-needed.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/rancher-is-no-longer-needed.md
index ffb98927c69..0ae8e7be37b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/rancher-is-no-longer-needed.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/rancher-is-no-longer-needed.md
@@ -4,20 +4,19 @@ title: 卸载 Rancher
本文介绍了如果你不再需要 Rancher、不想再由 Rancher 管理集群、或想删除 Rancher Server 需要怎么做。
-
-### 如果 Rancher Server 被删除,下游集群中的工作负载会怎样?
+## 如果 Rancher Server 被删除,下游集群中的工作负载会怎样?
如果 Rancher 删除了或无法恢复,Rancher 管理的下游 Kubernetes 集群中的所有工作负载将继续正常运行。
-### 如果删除了 Rancher Server,该如何访问下游集群?
+## 如果删除了 Rancher Server,该如何访问下游集群?
如果删除了 Rancher,访问下游集群的方式取决于集群的类型和集群的创建方式。总而言之:
- **注册集群**:集群不受影响,你可以注册集群前的方法访问该集群。
- **托管的 Kubernetes 集群**:如果你在 Kubernetes 云提供商(例如 EKS、GKE 或 AKS)中创建集群,你可以继续使用提供商的云凭证来管理集群。
-- **RKE 集群**:要访问 [RKE 集群](../pages-for-subheaders/launch-kubernetes-with-rancher.md),集群必须启用了[授权集群端点(authorized cluster endpoint,ACE)](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点),而且你必须从 Rancher UI 下载了集群的 kubeconfig 文件。RKE 集群默认启用授权集群端点。通过使用此端点,你可以直接使用 kubectl 访问你的集群,而不用通过 Rancher Server 的[认证代理](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-认证代理)进行通信。有关配置 kubectl 以使用授权集群端点的说明,请参阅[使用 kubectl 和 kubeconfig 文件直接访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)。这些集群将使用删除 Rancher 时配置的身份验证快照。
+- **RKE 集群**:要访问 [RKE 集群](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md),集群必须启用了[授权集群端点(authorized cluster endpoint,ACE)](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点),而且你必须从 Rancher UI 下载了集群的 kubeconfig 文件。RKE 集群默认启用授权集群端点。通过使用此端点,你可以直接使用 kubectl 访问你的集群,而不用通过 Rancher Server 的[认证代理](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-认证代理)进行通信。有关配置 kubectl 以使用授权集群端点的说明,请参阅[使用 kubectl 和 kubeconfig 文件直接访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)。这些集群将使用删除 Rancher 时配置的身份验证快照。
-### 如果我不想再使用 Rancher 了该怎么做?
+## 如果我不想再使用 Rancher 了该怎么做?
:::note
@@ -25,7 +24,7 @@ title: 卸载 Rancher
:::
-如果你[在 Kubernetes 集群上安装了 Rancher](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md),你可以使用 [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) 工具删除 Rancher。
+如果你[在 Kubernetes 集群上安装了 Rancher](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md),你可以使用 [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) 工具删除 Rancher。
在高可用 (HA) 模式下卸载 Rancher 还将删除所有 `helm-operation-*` Pod 和以下应用程序:
@@ -40,7 +39,7 @@ title: 卸载 Rancher
移除 Rancher 不会影响导入的集群。有关其他集群类型,请参考[移除 Rancher 后访问下游集群](#如果删除了-rancher-server该如何访问下游集群)。
-### 如果我不想 Rancher 管理我的注册集群该怎么办?
+## 如果我不想 Rancher 管理我的注册集群该怎么办?
如果你在 Rancher UI 中删除了已注册的集群,则该集群将与 Rancher 分离,集群不会发生改变,你可以使用注册集群之前的方法访问该集群。
@@ -52,7 +51,7 @@ title: 卸载 Rancher
**结果**:注册的集群已与 Rancher 分离,并在 Rancher 外正常运行。
-### 如果我不想 Rancher 管理我的 RKE 集群或托管的 Kubernetes 集群该怎么办?
+## 如果我不想 Rancher 管理我的 RKE 集群或托管的 Kubernetes 集群该怎么办?
目前,我们没有将这些集群从 Rancher 中分离出来的功能。在这种情况下,“分离”指的是将 Rancher 组件移除出集群,并独立于 Rancher 管理对集群的访问。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/security.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/security.md
index 0078c58eac7..805cfd72c7f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/security.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/security.md
@@ -5,10 +5,10 @@ title: 安全
**是否有强化指南?**
-强化指南现在位于[安全](../pages-for-subheaders/rancher-security.md)部分。
+强化指南现在位于[安全](../reference-guides/rancher-security/rancher-security.md)部分。
**Rancher Kubernetes 集群 CIS Benchmark 测试的结果是什么?**
-我们已经针对强化的 Rancher Kubernetes 集群运行了 CIS Kubernetes Benchmark 测试。你可以在[安全](../pages-for-subheaders/rancher-security.md)中找到该评估的结果。
+我们已经针对强化的 Rancher Kubernetes 集群运行了 CIS Kubernetes Benchmark 测试。你可以在[安全](../reference-guides/rancher-security/rancher-security.md)中找到该评估的结果。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/technical-items.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/technical-items.md
index 2bc3cfb6bfc..b781a8cebb1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/technical-items.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/technical-items.md
@@ -2,9 +2,10 @@
title: 技术
---
-### 如何重置管理员密码?
+## 如何重置管理员密码?
Docker 安装:
+
```
$ docker exec -ti reset-password
New password for default administrator (user-xxxxx):
@@ -12,6 +13,7 @@ New password for default administrator (user-xxxxx):
```
Kubernetes 安装(Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password
@@ -19,10 +21,10 @@ New password for default administrator (user-xxxxx):
```
+## 我删除/停用了最后一个 admin,该如何解决?
-
-### 我删除/停用了最后一个 admin,该如何解决?
Docker 安装:
+
```
$ docker exec -ti ensure-default-admin
New default administrator (user-xxxxx)
@@ -31,38 +33,40 @@ New password for default administrator (user-xxxxx):
```
Kubernetes 安装(Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin
New password for default administrator (user-xxxxx):
```
-### 如何启用调试日志记录?
+
+## 如何启用调试日志记录?
请参阅[故障排除:日志记录](../troubleshooting/other-troubleshooting-tips/logging.md)。
-### 我的 ClusterIP 不响应 ping,该如何解决?
+## 我的 ClusterIP 不响应 ping,该如何解决?
ClusterIP 是一个虚拟 IP,不会响应 ping。要测试 ClusterIP 是否配置正确,最好的方法是使用 `curl` 访问 IP 和端口并检查它是否响应。
-### 在哪里管理节点模板?
+## 在哪里管理节点模板?
打开你的账号菜单(右上角)并选择`节点模板`。
-### 为什么我的四层负载均衡器处于 `Pending` 状态?
+## 为什么我的四层负载均衡器处于 `Pending` 状态?
-四层负载均衡器创建为 `type: LoadBalancer`。Kubernetes 需要一个可以满足这些请求的云提供商或控制器,否则这些请求将永远处于 `Pending` 状态。有关更多信息,请参阅[云提供商](../pages-for-subheaders/set-up-cloud-providers.md)或[创建外部负载均衡器](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)。
+四层负载均衡器创建为 `type: LoadBalancer`。Kubernetes 需要一个可以满足这些请求的云提供商或控制器,否则这些请求将永远处于 `Pending` 状态。有关更多信息,请参阅[云提供商](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)或[创建外部负载均衡器](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)。
-### Rancher 的状态存储在哪里?
+## Rancher 的状态存储在哪里?
- Docker 安装:在 `rancher/rancher` 容器的嵌入式 etcd 中,位于 `/var/lib/rancher`。
- Kubernetes install:在为运行 Rancher 而创建的 RKE 集群的 etcd 中。
-### 支持的 Docker 版本是如何确定的?
+## 支持的 Docker 版本是如何确定的?
我们遵循上游 Kubernetes 版本验证过的 Docker 版本。如果需要获取验证过的版本,请查看 Kubernetes 版本 CHANGELOG.md 中的 [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies)。
-### 如何访问 Rancher 创建的节点?
+## 如何访问 Rancher 创建的节点?
你可以转到**节点**视图,然后下载用于访问 Rancher 创建的节点的 SSH 密钥。选择要访问的节点并单击行尾 **⋮** 按钮,然后选择**下载密钥**,如下图所示。
@@ -74,14 +78,14 @@ ClusterIP 是一个虚拟 IP,不会响应 ping。要测试 ClusterIP 是否配
$ ssh -i id_rsa user@ip_of_node
```
-### 如何在 Rancher 中自动化任务 X?
+## 如何在 Rancher 中自动化任务 X?
UI 由静态文件组成,并根据 API 的响应工作。换言之,UI 中可以执行的每个操作/任务都可以通过 API 进行自动化。有两种方法可以实现这一点:
* 访问 `https://your_rancher_ip/v3` 并浏览 API 选项。
* 在使用 UI 时捕获 API 调用(通常使用 [Chrome 开发者工具](https://developers.google.com/web/tools/chrome-devtools/#network),但你也可以使用其他工具)。
-### 节点的 IP 地址改变了,该如何恢复?
+## 节点的 IP 地址改变了,该如何恢复?
节点需要配置静态 IP(或使用 DHCP 保留的 IP)。如果节点的 IP 已更改,你必须在集群中删除并重新添加它。删除后,Rancher 会将集群更新为正确的状态。如果集群不再处于 `Provisioning` 状态,则已从集群删除该节点。
@@ -89,11 +93,11 @@ UI 由静态文件组成,并根据 API 的响应工作。换言之,UI 中可
在集群中移除并清理节点时,你可以将节点重新添加到集群中。
-### 如何将其他参数/绑定/环境变量添加到 Rancher 启动的 Kubernetes 集群的 Kubernetes 组件中?
+## 如何将其他参数/绑定/环境变量添加到 Rancher 启动的 Kubernetes 集群的 Kubernetes 组件中?
你可以使用集群选项中的[配置文件](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)选项来添加其他参数/绑定/环境变量。有关详细信息,请参阅 RKE 文档中的[其他参数、绑定和环境变量](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/),或浏览 [Cluster.ymls 示例](https://rancher.com/docs/rke/latest/en/example-yamls/)。
-### 如何检查证书链是否有效?
+## 如何检查证书链是否有效?
使用 `openssl verify` 命令来验证你的证书链:
@@ -134,7 +138,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com
issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA
```
-### 如何在服务器证书中检查 `Common Name` 和 `Subject Alternative Names`?
+## 如何在服务器证书中检查 `Common Name` 和 `Subject Alternative Names`?
虽然技术上仅需要 `Subject Alternative Names` 中有一个条目,但在 `Common Name` 和 `Subject Alternative Names` 中都包含主机名可以最大程度地提高与旧版浏览器/应用程序的兼容性。
@@ -152,7 +156,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS
DNS:rancher.my.org
```
-### 为什么节点发生故障时重新调度一个 pod 需要 5 分钟以上的时间?
+## 为什么节点发生故障时重新调度一个 pod 需要 5 分钟以上的时间?
这是以下默认 Kubernetes 设置的组合导致的:
@@ -171,6 +175,6 @@ Kubernetes 1.13 默认启用 `TaintBasedEvictions` 功能。有关详细信息
* `default-not-ready-toleration-seconds`:表示 `notReady:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
* `default-unreachable-toleration-seconds`:表示 `unreachable:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
-### 我可以在 UI 中使用键盘快捷键吗?
+## 我可以在 UI 中使用键盘快捷键吗?
是的,你可以使用键盘快捷键访问 UI 的大部分内容。要查看快捷方式的概览,请在 UI 任意位置按 `?`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/faq/telemetry.md b/i18n/zh/docusaurus-plugin-content-docs/current/faq/telemetry.md
index 400f6e839ad..8d6f997c443 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/faq/telemetry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/faq/telemetry.md
@@ -2,11 +2,11 @@
title: 遥测
---
-### 什么是遥测?
+## 什么是遥测?
遥测(Telemetry)收集 Rancher 安装大小、使用的组件版本以及使用功能的汇总信息。Rancher Labs 会使用此信息来改进产品,我们不会与第三方共享此信息。
-### 收集什么信息?
+## 收集什么信息?
我们不会收集任何识别信息(如用户名、密码或用户资源的名称或地址)。
@@ -20,12 +20,12 @@ title: 遥测
- 运行的 Rancher 的镜像名称和版本。
- 此安装的唯一随机标识符。
-### 我可以看到发送的信息吗?
+## 我可以看到发送的信息吗?
如果启用了遥测,你可以转到 `https:///v1-telemetry` 查看当前数据。
如果未启用遥测,则收集数据的进程未运行,因此没有可供查看的内容。
-### 如何打开或关闭它?
+## 如何打开或关闭它?
完成初始设置后,管理员可以转到 UI `全局`中的`设置`页面,单击**编辑**,然后将 `telemetry-opt` 更改为 `in` 或 `out`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
index 8e5c0eca63f..4f6461e85c6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
@@ -8,7 +8,7 @@ title: 在离线环境中升级
:::
-### Rancher Helm 模板选项
+## Rancher Helm 模板选项
使用安装 Rancher 时选择的选项来渲染 Rancher 模板。参考下表来替换每个占位符。Rancher 需要配置为使用私有镜像仓库,以便配置所有 Rancher 启动的 Kubernetes 集群或 Rancher 工具。
@@ -21,7 +21,6 @@ title: 在离线环境中升级
| `` | 你的私有镜像仓库的 DNS 名称。 |
| `` | 在 K8s 集群上运行的 cert-manager 版本。 |
-
### 选项 A:使用默认的自签名证书
```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
index 437bc35815e..d2f716b4038 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
@@ -73,7 +73,7 @@ Rancher 是使用 Kubernetes 的 [Helm](https://helm.sh/) 包管理器安装的
### 1. 添加 Helm Chart 仓库
-执行 `helm repo add` 命令,以添加包含安装 Rancher 的 Chart 的 Helm Chart 仓库。有关如何选择仓库,以及哪个仓库最适合你的用例,请参见[选择 Rancher 版本](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md)。
+执行 `helm repo add` 命令,以添加包含安装 Rancher 的 Chart 的 Helm Chart 仓库。有关如何选择仓库,以及哪个仓库最适合你的用例,请参见[选择 Rancher 版本](../resources/choose-a-rancher-version.md)。
- Latest:建议用于试用最新功能
```
@@ -103,7 +103,7 @@ Rancher Management Server 默认需要 SSL/TLS 配置来保证访问的安全性
:::note
-如果你想在外部终止 SSL/TLS,请参见[外部负载均衡器的 TLS 终止](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)。
+如果你想在外部终止 SSL/TLS,请参见[外部负载均衡器的 TLS 终止](../installation-references/helm-chart-options.md#外部-tls-终止)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
index 04c2d4ff181..a16c142b6bc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
@@ -180,7 +180,7 @@ ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:3187
## 10. 安装 Rancher Helm Chart
-按照[本页](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
+按照[本页](./install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
安装 Rancher 时,使用上一步获取的 DNS 名称作为 Rancher Server 的 URL。它可以作为 Helm 选项传递进来。例如,如果 DNS 名称是 `rancher.my.org`,你需要使用 `--set hostname=rancher.my.org` 选项来运行 Helm 安装命令。
@@ -190,7 +190,7 @@ ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:3187
--set ingress.ingressClassName=nginx
```
-请参阅[Helm 安装命令](install-upgrade-on-a-kubernetes-cluster.md#5-根据你选择的证书选项通过-helm-安装-rancher)了解你的证书选项。
+请参阅[Helm 安装命令](./install-upgrade-on-a-kubernetes-cluster.md#5-根据你选择的证书选项通过-helm-安装-rancher)了解你的证书选项。
在 Rancher v2.7.5 中,如果你打算在集群上使用默认的 GKE Ingress 而不启用 VPC 原生的集群模式,则需要设置以下标志:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
index d5c661c421f..b834e7f6e11 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
@@ -75,7 +75,7 @@ Rancher v2.6.4 将 cluster-api 模块从 v0.4.4 升级到 v1.0.2。反过来,c
1. 在左侧导航栏中,点击 **Rancher 备份 > 还原**。
:::note
- 如果 Rancher Backups 应用不可见,你需要到 **Apps** 的 Charts 页面中安装应用。详情请参见[此处](../../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md#access-charts)。
+ 如果 Rancher Backups 应用不可见,你需要到 **Apps** 的 Charts 页面中安装应用。详情请参见[此处](../../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md#访问-charts)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
index 2912d831f55..b303fc0c1c9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
@@ -4,7 +4,7 @@ title: Rancher Server Kubernetes 集群的问题排查
本文介绍如何对安装在 Kubernetes 集群上的 Rancher 进行故障排除。
-### 相关命名空间
+## 相关命名空间
故障排除主要针对以下 3 个命名空间中的对象:
@@ -12,7 +12,7 @@ title: Rancher Server Kubernetes 集群的问题排查
- `ingress-nginx`:Ingress Controller Pod 和 services。
- `cert-manager`:`cert-manager` Pod。
-### "default backend - 404"
+## "default backend - 404"
很多操作都有可能导致 Ingress Controller 无法将流量转发到你的 Rancher 实例。但是大多数情况下都是由错误的 SSL 配置导致的。
@@ -21,7 +21,7 @@ title: Rancher Server Kubernetes 集群的问题排查
- [Rancher 是否正在运行](#检查-rancher-是否正在运行)
- [证书的 Common Name(CN)是 "Kubernetes Ingress Controller Fake Certificate"](#证书的-cn-是-kubernetes-ingress-controller-fake-certificate)
-### 检查 Rancher 是否正在运行
+## 检查 Rancher 是否正在运行
使用 `kubectl` 检查 `cattle-system` 系统命名空间,并查看 Rancher Pod 的状态是否是 **Running**:
@@ -49,7 +49,7 @@ Events:
Normal Started 11m kubelet, localhost Started container
```
-### 检查 Rancher 日志
+## 检查 Rancher 日志
使用 `kubectl` 列出 Pod:
@@ -66,7 +66,7 @@ pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m
kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh
```
-### 证书的 CN 是 "Kubernetes Ingress Controller Fake Certificate"
+## 证书的 CN 是 "Kubernetes Ingress Controller Fake Certificate"
使用浏览器检查证书的详细信息。如果显示 CN 是 "Kubernetes Ingress Controller Fake Certificate",则说明读取或颁发 SSL 证书时出现了问题。
@@ -76,7 +76,7 @@ kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh
:::
-### 排查 Cert-Manager 颁发的证书(Rancher 或 Let's Encrypt 生成的)问题
+## 排查 Cert-Manager 颁发的证书(Rancher 或 Let's Encrypt 生成的)问题
`cert-manager` 有 3 部分:
@@ -107,7 +107,7 @@ Events:
Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found
```
-### 排查你自己提供的 SSL 证书问题
+## 排查你自己提供的 SSL 证书问题
你的证书直接应用于 `cattle-system` 命名空间中的 Ingress 对象。
@@ -127,7 +127,7 @@ kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-co
W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found
```
-### 没有匹配的 "Issuer"
+## 没有匹配的 "Issuer"
你所选的 SSL 配置要求在安装 Rancher 之前先安装 Cert-Manager,否则会出现以下错误:
@@ -138,18 +138,18 @@ Error: validation failed: unable to recognize "": no matches for kind "Issuer" i
在这种情况下,先安装 Cert-Manager,然后再重新安装 Rancher。
-### Canal Pod 显示 READY 2/3
+## Canal Pod 显示 READY 2/3
此问题的最常见原因是端口 8472/UDP 在节点之间未打开。因此,你可以检查你的本地防火墙、网络路由或安全组。
解决网络问题后,`canal` Pod 会超时并重启以建立连接。
-### nginx-ingress-controller Pod 显示 RESTARTS
+## nginx-ingress-controller Pod 显示 RESTARTS
此问题的最常见原因是 `canal` pod 未能建立覆盖网络。参见 [canal Pod 显示 READY `2/3`](#canal-pod-显示-ready-23) 进行排查。
-### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed)
+## Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed)
此错误的原因可能是:
@@ -171,18 +171,18 @@ $ nc xxx.xxx.xxx.xxx 22
SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10
```
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found
`ssh_key_path` 密钥文件无法访问:请确保你已经指定了私钥文件(不是公钥 `.pub`),而且运行 `rke` 命令的用户可以访问该私钥文件。
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain
`ssh_key_path` 密钥文件不是访问节点的正确文件:请仔细检查,确保你已为节点指定了正确的 `ssh_key_path` 和连接用户。
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys
如需使用加密的私钥,请使用 `ssh-agent` 来使用密码来加载密钥。如果在运行 `rke` 命令的环境中找到 `SSH_AUTH_SOCK` 环境变量,它将自动用于连接到节点。
-### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
+## Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
节点无法通过配置的 `address` 和 `port` 访问。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-references/feature-flags.md
index 5a022b13135..e14ec8e7d9e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-references/feature-flags.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-references/feature-flags.md
@@ -15,14 +15,14 @@ title: 功能开关
以下是 Rancher 中可用的功能开关列表。如果你是从旧 Rancher 版本升级的,你可能会在 Rancher UI 中看到其他功能,例如 `proxy` 或 `dashboard`(均[已中断](/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md)):
- `continuous-delivery`:允许从 Fleet 中单独禁用 Fleet GitOps。有关详细信息,请参阅[持续交付](../../../how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md)。
-- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
+- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../integrations-in-rancher/fleet/fleet.md)。
- `harvester`:管理 Virtualization Management 页面的访问。用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。有关详细信息,请参阅 [Harvester 集成](../../../integrations-in-rancher/harvester/overview.md)。
- `istio-virtual-service-ui`:启用[可视界面](../../../how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md)来创建、读取、更新和删除 Istio 虚拟服务和目标规则,这些都是 Istio 流量管理功能。
- `legacy`:启用 2.5.x 及更早版本的一组功能,这些功能正逐渐被新的实现淘汰。它们是已弃用以及后续可用于新版本的功能组合。新的 Rancher 安装会默认禁用此标志。如果你从以前版本的 Rancher 升级,此标志会启用。
- `multi-cluster-management`:允许配置和管理多个 Kubernetes 集群。此标志只能在安装时设置。后续无法启用或禁用它。
- `rke1-custom-node-cleanup`:清除已删除的 RKE1 自定义节点。建议你启用此标志,以防止已删除的节点尝试重新加入集群。
- `rke2`:启用配置 RKE2 集群。此标志默认启用。
-- `token-hashing`:启用令牌哈希。启用后,会使用 SHA256 算法对现有 Token 和所有新 Token 进行哈希处理。一旦对 Token 进行哈希处理,就无法撤消操作。此标志在启用后无法禁用。有关详细信息,请参阅 [API 令牌](../../../reference-guides/about-the-api/api-tokens.md#令牌哈希)。
+- `token-hashing`:启用令牌哈希。启用后,会使用 SHA256 算法对现有 Token 和所有新 Token 进行哈希处理。一旦对 Token 进行哈希处理,就无法撤消操作。此标志在启用后无法禁用。有关详细信息,请参阅 [API 令牌](../../../api/api-tokens.md#令牌哈希)。
- `unsupported-storage-drivers`:允许启用非默认启用的存储提供程序和卷插件。有关详细信息,请参阅[允许使用不受支持的存储驱动程序](../../../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)。
下表介绍了 Rancher 中功能开关的可用性和默认值。标记为“GA”的功能已普遍可用:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
index 84f2383eaf7..8a651472811 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
@@ -18,7 +18,7 @@ enable_cri_dockerd: true
如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。然后,你就可以通过 Rancher 对导入的 RKE2 和 K3s Kubernetes 集群进行升级和管理。
-### 常见问题
+## 常见问题
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
index e4800c2528f..9de7379796d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
@@ -75,7 +75,7 @@ Rancher 的硬件占用空间取决于许多因素,包括:
- 工作负载数量 (例如: Kubernetes 部署,Fleet 部署)。
- 使用模式 (例如:主动使用的功能集合,使用频率,并发用户数量).
-由于存在许多可能随时间变化的影响因素,因此此处列出的要求为适合大多数用例的起点。 然而,你的用例可能有不同的要求。 若你需要对于特定场景的咨询,请[联系 Rancher]((https://rancher.com/contact/)) 以获得进一步指导。
+由于存在许多可能随时间变化的影响因素,因此此处列出的要求为适合大多数用例的起点。 然而,你的用例可能有不同的要求。 若你需要对于特定场景的咨询,请[联系 Rancher](https://rancher.com/contact/) 以获得进一步指导。
特别指出,本页面中的要求基于以下假设的环境提出,包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
index a639349d8cf..23612291fc0 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
@@ -24,7 +24,7 @@ Docker 安装适用于想要测试 Rancher 的用户。
选择以下的选项之一:
-### 选项 A:使用 Rancher 默认的自签名证书
+## 选项 A:使用 Rancher 默认的自签名证书
单击展开
@@ -51,7 +51,7 @@ docker run -d --restart=unless-stopped \
-### 选项 B:使用你自己的证书 - 自签名
+## 选项 B:使用你自己的证书 - 自签名
单击展开
@@ -94,7 +94,7 @@ docker run -d --restart=unless-stopped \
-### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
+## 选项 C:使用你自己的证书 - 可信 CA 签名的证书
单击展开
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
index 8978a639b0e..e70759c3f3a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
@@ -21,7 +21,7 @@ Rancher 可以安装在任何 Kubernetes 集群上。为了阅读方便,我们
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
- **私有镜像仓库**,用于将容器镜像分发到你的主机。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
@@ -29,7 +29,7 @@ Rancher 可以安装在任何 Kubernetes 集群上。为了阅读方便,我们
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置外部数据库
+## 2. 配置外部数据库
K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
@@ -45,7 +45,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
-### 3. 配置负载均衡器
+## 3. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -68,7 +68,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -78,7 +78,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 5. 配置私有镜像仓库
+## 5. 配置私有镜像仓库
Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -102,21 +102,21 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -139,7 +139,7 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -149,7 +149,7 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 4. 配置私有镜像仓库
+## 4. 配置私有镜像仓库
Rancher 支持使用安全的私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -172,15 +172,15 @@ Rancher 支持使用安全的私有镜像仓库进行离线安装。你必须有
:::
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
此主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
-请确保你的节点满足[操作系统,容器,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规安装要求。
+请确保你的节点满足[操作系统,容器,硬件和网络](../../installation-requirements/installation-requirements.md)的常规安装要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置私有 Docker 镜像仓库
+## 2. 配置私有 Docker 镜像仓库
Rancher 支持使用私有镜像仓库在堡垒服务器中进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -189,5 +189,5 @@ Rancher 支持使用私有镜像仓库在堡垒服务器中进行离线安装。
-### 后续操作
+## 后续操作
[收集镜像并发布到你的私有镜像仓库](publish-images.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
index d28b59ed745..faed3d842b8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
@@ -4,7 +4,7 @@ title: 4. 安装 Rancher
本文介绍如何在高可用 Kubernetes 安装的离线环境部署 Rancher。离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### Rancher 特权访问
+## Rancher 特权访问
当 Rancher Server 部署在 Docker 容器中时,容器内会安装一个本地 Kubernetes 集群供 Rancher 使用。为 Rancher 的很多功能都是以 deployment 的方式运行的,而在容器内运行容器是需要特权模式的,因此你需要在安装 Rancher 时添加 `--privileged` 选项。
@@ -116,7 +116,7 @@ curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/re
将获取的 Chart 复制到有权访问 Rancher Server 集群的系统以完成安装。
-##### 1. 安装 Cert-Manager
+#### 1. 安装 Cert-Manager
使用要用于安装 Chart 的选项来安装 cert-manager。记住要设置 `image.repository` 选项,以从你的私有镜像仓库拉取镜像。此操作会创建一个包含 Kubernetes manifest 文件的 `cert-manager` 目录。
@@ -156,7 +156,8 @@ curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/re
-##### 2. 安装 Rancher
+#### 2. 安装 Rancher
+
首先,参见[添加 TLS 密文](../../resources/add-tls-secrets.md)发布证书文件,以便 Rancher 和 Ingress Controller 可以使用它们。
然后,使用 kubectl 为 Rancher 创建命名空间:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
index 24d8ac7d47c..f16dfc23d57 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
@@ -2,13 +2,13 @@
title: 其他安装方式
---
-### 离线安装
+## 离线安装
按照[以下步骤](air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### Docker 安装
+## Docker 安装
[单节点 Docker 安装](rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
index 7ac7caca50c..8ca4c273e92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
@@ -4,7 +4,7 @@ title: 3. 安装 Rancher
在前文的操作后,你已经有了一个运行的 RKE 集群,现在可以在其中安装 Rancher 了。出于安全考虑,所有到 Rancher 的流量都必须使用 TLS 加密。在本教程中,你将使用 [cert-manager](https://cert-manager.io/)自动颁发自签名证书。在实际使用情况下,你可使用 Let's Encrypt 或自己的证书。
-### 安装 Helm CLI
+## 安装 Helm CLI
@@ -16,7 +16,7 @@ chmod +x get_helm.sh
sudo ./get_helm.sh
```
-### 安装 cert-manager
+## 安装 cert-manager
添加 cert-manager Helm 仓库:
@@ -59,7 +59,7 @@ kubectl rollout status deployment -n cert-manager cert-manager
kubectl rollout status deployment -n cert-manager cert-manager-webhook
```
-### 安装 Rancher
+## 安装 Rancher
接下来,你可以安装 Rancher 了。首先,添加 Helm 仓库:
@@ -97,7 +97,7 @@ kubectl rollout status deployment -n cattle-system rancher
:::
-### 其他资源
+## 其他资源
以下资源可能对安装 Rancher 有帮助:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
index f60e31b2c3e..88f410f4314 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
@@ -12,13 +12,13 @@ title: '1. 配置基础设施'
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机将通过 HTTP 代理连接到互联网。
@@ -26,7 +26,7 @@ title: '1. 配置基础设施'
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -49,7 +49,7 @@ title: '1. 配置基础设施'
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -60,5 +60,5 @@ title: '1. 配置基础设施'
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 后续操作
+## 后续操作
[配置 Kubernetes 集群](install-kubernetes.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
index 34ee707431b..992fd0b55e2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
@@ -4,7 +4,7 @@ title: 证书故障排除
-### 如何确定我的证书格式是否为 PEM?
+## 如何确定我的证书格式是否为 PEM?
你可以通过以下特征识别 PEM 格式:
@@ -48,7 +48,7 @@ VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
-----END PRIVATE KEY-----
```
-### 将 PKCS8 证书密钥转换为 PKCS1
+## 将 PKCS8 证书密钥转换为 PKCS1
如果你使用的是 PKCS8 证书密钥文件,Rancher 将打印以下日志:
@@ -64,7 +64,7 @@ openssl rsa -in key.pem -out convertedkey.pem
你可使用 `convertedkey.pem` 作为 Rancher 证书密钥文件。
-### 添加中间证书的顺序是什么?
+## 添加中间证书的顺序是什么?
添加证书的顺序如下:
@@ -77,7 +77,7 @@ openssl rsa -in key.pem -out convertedkey.pem
-----END CERTIFICATE-----
```
-### 如何验证我的证书链?
+## 如何验证我的证书链?
你可使用 `openssl` 二进制文件来验证证书链。如果命令的输出以 `Verify return code: 0 (ok)` 结尾(参见以下示例),你的证书链是有效的。`ca.pem` 文件必须与你添加到 `rancher/rancher` 容器中的文件一致。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
index c2a63b86a5e..ae91e251294 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
@@ -73,7 +73,7 @@ docker run -d --restart=unless-stopped \
使用 [OpenSSL](https://www.openssl.org/) 或其他方法创建自签名证书。
- 证书文件的格式必须是 PEM。
-- 在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+- 在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](./certificate-troubleshooting.md)。
:::
@@ -107,7 +107,7 @@ docker run -d --restart=unless-stopped \
:::note 先决条件:
- 证书文件的格式必须是 PEM。
-- 在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+- 在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](./certificate-troubleshooting.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
index 7dbef6d2cab..06a8d7adfa9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
@@ -147,7 +147,7 @@ docker run -d --volumes-from rancher-data \
rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -183,7 +183,7 @@ docker run -d --volumes-from rancher-data \
rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -218,7 +218,7 @@ docker run -d --volumes-from rancher-data \
--no-cacerts
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 D:Let's Encrypt 证书
@@ -255,7 +255,7 @@ docker run -d --volumes-from rancher-data \
--acme-domain
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -288,7 +288,7 @@ docker run -d --volumes-from rancher-data \
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 B:使用你自己的证书 - 自签名
@@ -324,7 +324,7 @@ docker run -d --restart=unless-stopped \
--privileged \
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
@@ -366,7 +366,7 @@ docker run -d --volumes-from rancher-data \
--privileged
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/bootstrap-password.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/bootstrap-password.md
index 249b14115a0..6819429dbe5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/bootstrap-password.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/bootstrap-password.md
@@ -6,7 +6,7 @@ Rancher 首次启动时,会为第一个管理员用户随机生成一个密码
如果你在安装过程中没有使用变量来设置引导密码,则会随机生成引导密码。如需了解使用变量设置引导密码的详情,请参见下文。
-### 在 Helm 安装中指定引导密码
+## 在 Helm 安装中指定引导密码
Helm 安装的情况下,你可以使用 `.Values.bootstrapPassword` 在 Helm Chart 值中指定引导密码变量。
@@ -16,7 +16,7 @@ Helm 安装的情况下,你可以使用 `.Values.bootstrapPassword` 在 Helm C
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}'
```
-### 在 Docker 安装中指定引导密码
+## 在 Docker 安装中指定引导密码
如果 Rancher 是使用 Docker 安装的,你可以通过在 Docker 安装命令中传递 `-e CATTLE_BOOTSTRAP_PASSWORD=password` 来指定引导密码。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
index 7ddfcef2d8e..734ef56e8a5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
@@ -95,7 +95,7 @@ Rancher Helm Chart 版本与 Rancher 版本(即 `appVersion`)对应。添加
-在执行 [Docker 安装](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)、升级或回滚时,你可以使用 _tags_ 来安装特定版本的 Rancher。
+在执行 [Docker 安装](../other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)、升级或回滚时,你可以使用 _tags_ 来安装特定版本的 Rancher。
### Server 标签
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/resources.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/resources.md
index 2862aba2cd2..ce4989d9d0e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/resources.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/resources.md
@@ -2,19 +2,19 @@
title: 资源
---
-### Docker 安装
+## Docker 安装
[单节点 Docker 安装](../other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
-### 离线安装
+## 离线安装
按照[以下步骤](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### 高级选项
+## 高级选项
安装 Rancher 时,有如下几个可开启的高级选项:每个安装指南中都提供了对应的选项。了解选项详情:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
index 4678487523e..c5eb98f8aaf 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
@@ -176,7 +176,7 @@ kubectl edit -n cattle-system deployment/cattle-cluster-agent
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
@@ -256,7 +256,7 @@ helm ls -n cattle-system
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
index 3d01c38aed8..7ea8aaaf817 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
@@ -32,7 +32,7 @@ Rancher 的 Kubernetes 元数据包含 Rancher 用于配置 [RKE 集群](../../h
- 更改 Rancher 用于同步元数据的 URL。适用于要让 Rancher 从本地同步而不是与 GitHub 同步的情况。这在离线环境下非常有用。
- 防止 Rancher 自动同步元数据。这可以防止在 Rancher 中使用新的/不受支持的 Kubernetes 版本。
-### 刷新 Kubernetes 元数据
+## 刷新 Kubernetes 元数据
默认情况下,管理员或具有**管理集群驱动**[全局角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)的用户,可以刷新 Kubernetes 元数据。
@@ -44,7 +44,7 @@ Rancher 的 Kubernetes 元数据包含 Rancher 用于配置 [RKE 集群](../../h
你可以将 `refresh-interval-minutes` 设置为 `0`(见下文),将 Rancher 配置为仅在需要时刷新元数据,并在需要时使用此按钮手动执行元数据刷新。
-### 配置元数据同步
+## 配置元数据同步
:::caution
@@ -70,7 +70,7 @@ RKE 元数据的配置控制 Rancher 同步元数据的频率以及从何处下
但是,如果你有[离线设置](#离线设置)需求,你需要将 Kubernetes 元数据仓库镜像到 Rancher 可用的位置。然后,你需要更改 URL 来指向 JSON 文件的新位置。
-### 离线设置
+## 离线设置
Rancher Server 会定期刷新 `rke-metadata-config` 来下载新的 Kubernetes 版本元数据。有关 Kubernetes 和 Rancher 版本的兼容性表,请参阅[服务条款](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/nodeports.md
index bf2294d4c06..1023f0da487 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/nodeports.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/nodeports.md
@@ -2,11 +2,11 @@
title: 部署带有 NodePort 的工作负载
---
-### 先决条件
+## 先决条件
你已有一个正在运行的集群,且该集群中有至少一个节点。
-### 1. 部署工作负载
+## 1. 部署工作负载
你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
@@ -36,11 +36,11 @@ title: 部署带有 NodePort 的工作负载
-### 2. 查看应用
+## 2. 查看应用
在**工作负载**页面中,点击工作负载下方的链接。如果 deployment 已完成,你的应用会打开。
-### 注意事项
+## 注意事项
如果使用云虚拟机,你可能无法访问运行容器的端口。这种情况下,你可以使用 `Execute Shell` 在本地主机的 SSH 会话中测试 Nginx。如果可用的话,使用工作负载下方的链接中 `:` 后面的端口号。在本例中,端口号为 `31568`。
@@ -125,11 +125,11 @@ gettingstarted@rancher:~$
```
-### 已完成!
+## 已完成!
恭喜!你已成功通过 NodePort 部署工作负载。
-#### 后续操作
+### 后续操作
使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
index 86f8017f09e..3beeb02b815 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
@@ -2,11 +2,11 @@
title: 部署带有 Ingress 的工作负载
---
-### 先决条件
+## 先决条件
你已有一个正在运行的集群,且该集群中有至少一个节点。
-### 1. 部署工作负载
+## 1. 部署工作负载
你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
@@ -19,7 +19,7 @@ title: 部署带有 Ingress 的工作负载
1. 点击 **Deployment**。
1. 为工作负载设置**名称**。
1. 在**容器镜像**字段中,输入 `rancher/hello-world`。注意区分大小写。
-1. 在 `Service Type` 点击 **Add Port** 和 `Cluster IP`,并在 **Private Container Port** 字段中输入`80`。你可以将 `Name` 留空或指定名称。通过添加端口,你可以访问集群内外的应用。有关详细信息,请参阅 [Service](../../../pages-for-subheaders/workloads-and-pods.md#services)。
+1. 在 `Service Type` 点击 **Add Port** 和 `Cluster IP`,并在 **Private Container Port** 字段中输入`80`。你可以将 `Name` 留空或指定名称。通过添加端口,你可以访问集群内外的应用。有关详细信息,请参阅 [Service](../../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md#services)。
1. 单击**创建**。
**结果**:
@@ -27,7 +27,7 @@ title: 部署带有 Ingress 的工作负载
* 工作负载已部署。此过程可能需要几分钟。
* 当工作负载完成部署后,它的状态会变为 **Active**。你可以从项目的**工作负载**页面查看其状态。
-### 2. 通过 Ingress 暴露应用
+## 2. 通过 Ingress 暴露应用
现在应用已启动并运行,你需要暴露应用以让其他服务连接到它。
@@ -53,17 +53,17 @@ title: 部署带有 Ingress 的工作负载
**结果**:应用分配到了一个 `sslip.io` 地址并暴露。这可能需要一两分钟。
-### 查看应用
+## 查看应用
在 **Deployments** 页面中,找到你 deployment 的 **endpoint** 列,然后单击一个 endpoint。可用的 endpoint 取决于你添加到 deployment 中的端口配置。如果你看不到随机分配端口的 endpoint,请将你在创建 Ingress 时指定的路径尾附到 IP 地址上。例如,如果你的 endpoint 是 `xxx.xxx.xxx.xxx` 或 `https://xxx.xxx.xxx.xxx`,把它修改为 `xxx.xxx.xxx.xxx/hello` 或 `https://xxx.xxx.xxx.xxx/hello`。
应用将在另一个窗口中打开。
-#### 已完成!
+### 已完成!
恭喜!你已成功通过 Ingress 部署工作负载。
-#### 后续操作
+### 后续操作
使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
index b10f0b61f1b..2e9ab992ebf 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -10,14 +10,14 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
## 操作系统,Docker,硬件和网络要求
-请确保你的节点满足常规的[安装要求](../../pages-for-subheaders/installation-requirements.md)。
+请确保你的节点满足常规的[安装要求](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
## 安装概要
## 1. 配置 Linux 主机
-根据我们的[要求](../../pages-for-subheaders/installation-requirements.md)配置一个 Linux 主机来启动 Rancher Server。
+根据我们的[要求](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)配置一个 Linux 主机来启动 Rancher Server。
## 2. 选择一个 SSL 选项并安装 Rancher
@@ -76,11 +76,11 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
1. 输入以下命令:
- ```
- docker run -d --restart=unless-stopped \
- -p 80:80 -p 443:443 \
- rancher/rancher:latest --no-cacerts
- ```
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
@@ -166,9 +166,7 @@ http {
## 后续操作
- **推荐**:检查单节点[备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)。你可能暂时没有需要备份的数据,但是我们建议你在常规使用 Rancher 后创建备份。
-- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)。
-
-
+- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)。
## 常见问题和故障排除
@@ -188,7 +186,7 @@ http {
### 离线环境
-如果你访问此页面是为了完成[离线安装](../../pages-for-subheaders/air-gapped-helm-cli-install.md),则在运行安装命令时,先将你的私有镜像仓库 URL 附加到 Server 标志中。也就是说,在 `rancher/rancher:latest` 前面添加 `` 和私有镜像仓库 URL。
+如果你访问此页面是为了完成[离线安装](../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md),则在运行安装命令时,先将你的私有镜像仓库 URL 附加到 Server 标志中。也就是说,在 `rancher/rancher:latest` 前面添加 `` 和私有镜像仓库 URL。
**示例**:
@@ -208,7 +206,7 @@ docker run -d --restart=unless-stopped \
rancher/rancher:latest
```
-此操作需要 [privileged 访问](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)。
+此操作需要 [privileged 访问](../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md#rancher-特权访问)。
这个 7 层 NGINX 配置已经在 NGINX 1.13(Mainline)和 1.14(Stable)版本上进行了测试。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-api-audit-log.md
index 94974cb52f7..285c70d5617 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-api-audit-log.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-api-audit-log.md
@@ -20,7 +20,7 @@ API 审计可以在 Rancher 安装或升级期间启用。
| 参数 | 描述 |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `AUDIT_LEVEL` | `0` - 禁用审计日志(默认) `1` - 日志事件元数据 `2` - 日志事件元数据和请求体 `3` - 日志事件元数据,请求体和响应体。请求/响应对的每个日志事务都使用同一个的 `auditID`。 如需了解每个设置记录的日志内容,请参见[审计日志级别](#审核日志级别)。 |
+| `AUDIT_LEVEL` | `0` - 禁用审计日志(默认) `1` - 日志事件元数据 `2` - 日志事件元数据和请求体 `3` - 日志事件元数据,请求体和响应体。请求/响应对的每个日志事务都使用同一个的 `auditID`。 如需了解每个设置记录的日志内容,请参见[审计日志级别](#审核日志级别)。 |
| `AUDIT_LOG_PATH` | Rancher Server API 的日志路径。默认路径:`/var/log/auditlog/rancher-api-audit.log`。你可以将日志目录挂载到主机。 示例:`AUDIT_LOG_PATH=/my/custom/path/` |
| `AUDIT_LOG_MAXAGE` | 旧审计日志文件可保留的最大天数。默认为 10 天。 |
| `AUDIT_LOG_MAXBACKUP` | 保留的审计日志最大文件个数。默认值为 10。 |
@@ -30,7 +30,7 @@ API 审计可以在 Rancher 安装或升级期间启用。
### 审核日志级别
-下表介绍了每个 [`AUDIT_LEVEL`](#audit-level) 记录的 API 事务:
+下表介绍了每个 [`AUDIT_LEVEL`](#api-审计日志选项) 记录的 API 事务:
| `AUDIT_LEVEL` 设置 | 请求元数据 | 请求体 | 响应元数据 | 响应体 |
| --------------------- | ---------------- | ------------ | ----------------- | ------------- |
@@ -59,7 +59,7 @@ kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log
#### 发送审计日志
-你可以为集群启用 Rancher 的内置日志收集和传送功能,将审计日志和其他服务日志发送到支持的 endpoint。详情请参见 [Rancher 工具 - Logging](../../pages-for-subheaders/logging.md)。
+你可以为集群启用 Rancher 的内置日志收集和传送功能,将审计日志和其他服务日志发送到支持的 endpoint。详情请参见 [Rancher 工具 - Logging](../../integrations-in-rancher/logging/logging.md)。
## 审计日志示例
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
index c1f42f70a4b..fdc7139d5ef 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
@@ -2,9 +2,9 @@
title: 持续交付
---
-Rancher 中预装的 [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
+Rancher 中预装的 [Fleet](../../../integrations-in-rancher/fleet/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 描述 |
---|---|---
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
index 35801de23b4..8e730c24cb3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
@@ -4,9 +4,9 @@ title: UI 管理 Istio 虚拟服务和目标规则
此功能可启动一个 UI,用于管理 Istio 的流量,其中包括创建、读取、更新和删除虚拟服务(Virtual Service)和目标规则(Destination Rule)。
-> **注意**:启用此功能并不会启用 Istio。集群管理员需要[为集群启用 Istio](../../../pages-for-subheaders/istio-setup-guide.md) 才能使用该功能。
+> **注意**:启用此功能并不会启用 Istio。集群管理员需要[为集群启用 Istio](../istio-setup-guide/istio-setup-guide.md) 才能使用该功能。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 状态 | 可用于 |
---|---|---|---
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
index 16ebb15e25a..2981f8e62a9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
@@ -11,7 +11,7 @@ title: "在 ARM64 上运行 Rancher(实验性)"
如果你的节点使用 ARM64 架构,你可以使用以下选项:
- 在 ARM64 架构的节点上运行 Rancher
- - 此选项仅适用于 Docker 安装。请知悉,以下安装命令取代了 [Docker 安装链接](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)中的示例:
+ - 此选项仅适用于 Docker 安装。请知悉,以下安装命令取代了 [Docker 安装链接](../../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)中的示例:
```
# 在最后一行 `rancher/rancher:vX.Y.Z` 中,请务必将 "X.Y.Z" 替换为包含 ARM64 版本的发布版本。例如,如果你的匹配版本是 v2.5.8,请在此行填写 `rancher/rancher:v2.5.8`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
index 19b9b27b2e0..7bd9c0cabfb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
@@ -4,13 +4,14 @@ title: 使用非默认支持的存储驱动
此功能允许你使用不是默认启用的存储提供商和卷插件。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 描述 |
---|---|---
| `unsupported-storage-drivers` | `false` | 启用非默认启用的存储提供商和卷插件。 |
-### 默认启用的持久卷插件
+## 默认启用的持久卷插件
+
下表描述了默认启用的存储类型对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
| 名称 | 插件 |
@@ -25,7 +26,8 @@ title: 使用非默认支持的存储驱动
| 网络文件系统 | `nfs` |
| hostPath | `host-path` |
-### 默认启用的 StorageClass
+## 默认启用的 StorageClass
+
下表描述了默认启用的 StorageClass 对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
| 名称 | 插件 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
index 39c9bdee036..2b7ec8c46b6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
@@ -19,11 +19,11 @@ title: 1. 在集群中启用 Istio
1. 如果你还没有安装 Monitoring 应用,系统会提示你安装 rancher-monitoring。你也可以选择在 Rancher-monitoring 安装上设置选择器或抓取配置选项。
1. 可选:为 Istio 组件配置成员访问和[资源限制](../../../integrations-in-rancher/istio/cpu-and-memory-allocations.md)。确保你的 Worker 节点上有足够的资源来启用 Istio。
1. 可选:如果需要,对 values.yaml 进行额外的配置更改。
-1. 可选:通过[覆盖文件](../../../pages-for-subheaders/configuration-options.md#覆盖文件)来添加其他资源或配置。
+1. 可选:通过[覆盖文件](../../../integrations-in-rancher/istio/configuration-options/configuration-options.md#覆盖文件)来添加其他资源或配置。
1. 单击**安装**。
**结果**:已在集群级别安装 Istio。
## 其他配置选项
-有关配置 Istio 的更多信息,请参阅[配置参考](../../../pages-for-subheaders/configuration-options.md)。
+有关配置 Istio 的更多信息,请参阅[配置参考](../../../integrations-in-rancher/istio/configuration-options/configuration-options.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
index 3651b9d77c6..95313cf2328 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
@@ -19,11 +19,11 @@ title: 2. 在命名空间中启用 Istio
**结果**:命名空间带有了 `istio-injection=enabled` 标签。默认情况下,部署在此命名空间中的所有新工作负载都将注入 Istio sidecar。
-### 验证是否启用了自动 Istio Sidecar 注入
+## 验证是否启用了自动 Istio Sidecar 注入
要验证 Istio 是否已启用,请在命名空间中部署一个 hello-world 工作负载。转到工作负载并单击 pod 名称。在**容器**中,你应该能看到 `istio-proxy` 容器。
-### 排除工作负载的 Istio Sidecar 注入
+## 排除工作负载的 Istio Sidecar 注入
要排除 Istio sidecar 被注入某工作负载,请在工作负载上使用以下注释:
@@ -48,6 +48,6 @@ sidecar.istio.io/inject: “false”
:::
+## 后续步骤
-### 后续步骤
[使用 Istio Sidecar 添加部署](use-istio-sidecar.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
index 9782bd28938..f143dc2cdc6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
@@ -72,5 +72,6 @@ spec:
**结果**:生成流到该服务的流量时(例如,刷新 Ingress Gateway URL),你可以在 Kiali 流量图中看到流到 `reviews` 服务的流量被平均分配到了 `v1` 和 `v3`。
-### 后续步骤
+## 后续步骤
+
[生成和查看流量](generate-and-view-traffic.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
index 2290de911ad..cae09528074 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
@@ -19,7 +19,7 @@ title: 3. 使用 Istio Sidecar 添加部署和服务
等待几分钟,然后工作负载将升级并具有 Istio sidecar。单击它并转到**容器**。你应该能看到该工作负载旁边的 `istio-proxy`。这意味着为工作负载启用了 Istio sidecar。Istio 正在为 Sidecar Envoy 做所有的接线工作。如果你现在在 yaml 中启用它们,Istio 可以自动执行所有功能。
-### 添加部署和服务
+## 添加部署和服务
以下是在命名空间中添加新 **Deployment** 的几种方法:
@@ -46,7 +46,7 @@ title: 3. 使用 Istio Sidecar 添加部署和服务
1. 如果你的文件存储在本地集群中,运行 `kubectl create -f .yaml`。
1. 或运行 `cat<< EOF | kubectl apply -f -`,将文件内容粘贴到终端,然后运行 `EOF` 来完成命令。
-### 部署和服务示例
+## 部署和服务示例
接下来,我们为 Istio 文档中的 BookInfo 应用的示例部署和服务添加 Kubernetes 资源:
@@ -87,7 +87,7 @@ Productpage 服务和部署:
- 一个 `bookinfo-productpage` 的 ServiceAccount。
- 一个 `productpage-v1` Deployment。
-### 资源 YAML
+## 资源 YAML
```yaml
# Copyright 2017 Istio Authors
@@ -356,5 +356,6 @@ spec:
---
```
-### 后续步骤
+## 后续步骤
+
[设置 Istio Gateway](set-up-istio-gateway.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
index 91578148d71..0283b228b92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
@@ -4,18 +4,18 @@ title: Pod 安全策略
:::note
-本文介绍的集群选项仅适用于 [Rancher 已在其中启动 Kubernetes 的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本文介绍的集群选项仅适用于 [Rancher 已在其中启动 Kubernetes 的集群](../../new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
你可以在创建项目的时候设置 Pod 安全策略(PSP)。如果在创建项目期间没有为项目分配 PSP,你也随时可以将 PSP 分配给现有项目。
-### 先决条件
+## 先决条件
- 在 Rancher 中创建 Pod 安全策略。在将默认 PSP 分配给现有项目之前,你必须有一个可分配的 PSP。有关说明,请参阅[创建 Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md)。
- 将默认 Pod 安全策略分配给项目所属的集群。如果 PSP 还没有应用到集群,你无法将 PSP 分配给项目。有关详细信息,请参阅[将 pod 安全策略添加到集群](../../new-user-guides/manage-clusters/add-a-pod-security-policy.md)。
-### 应用 Pod 安全策略
+## 应用 Pod 安全策略
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到需要移动命名空间的集群,然后单击 **Explore**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
index ce57b2db349..f4af6d0c85b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
@@ -14,7 +14,7 @@ title: 项目资源配额
Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes.io/docs/concepts/policy/resource-quotas/)相同的功能。Rancher 还扩展了资源配额的功能,从而让你将资源配额应用于项目。有关资源配额如何与 Rancher 中的项目一起使用的详细信息,请参阅[此页面](about-project-resource-quotas.md)。
-### 将资源配额应用于现有项目
+## 将资源配额应用于现有项目
修改资源配额的使用场景如下:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
index f29c3004b85..359dcd7695a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
@@ -12,7 +12,7 @@ title: 覆盖命名空间的默认限制
有关详细信息,请参阅[如何编辑命名空间资源配额](../../../new-user-guides/manage-clusters/projects-and-namespaces.md)。
-### 编辑命名空间资源配额
+## 编辑命名空间资源配额
如果你已为项目配置了资源配额,你可以覆盖命名空间默认限制,从而为特定命名空间提供对更多(或更少)项目资源的访问权限:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
index e56f617e7a0..1735266bd74 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
@@ -6,7 +6,7 @@ title: 设置容器默认资源限制
为了避免在创建工作负载期间对每个容器设置这些限制,可以在命名空间上指定一个默认的容器资源限制。
-### 编辑容器默认资源限制
+## 编辑容器默认资源限制
你可以在以下情况下编辑容器的默认资源限制:
@@ -19,7 +19,7 @@ title: 设置容器默认资源限制
1. 找到要编辑容器默认资源限制的项目。在该项目中选择 **⋮ > 编辑配置**。
1. 展开**容器默认资源限制**并编辑对应的值。
-### 沿用资源限制
+## 沿用资源限制
在项目级别设置默认容器资源限制后,项目中所有新建的命名空间都会沿用这个资源限制参数。新设置的限制不会影响项目中现有的命名空间。你需要为项目中的现有命名空间手动设置默认容器资源限制,以便创建容器时能应用该限制。
@@ -27,7 +27,7 @@ title: 设置容器默认资源限制
在命名空间上配置容器默认资源限制后,在该命名空间中创建的任何容器都会沿用该默认值。你可以在工作负载创建期间覆盖这些限制/预留。
-### 容器资源配额类型
+## 容器资源配额类型
可以配置以下资源限制:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
index 1ce6b33f141..5590a9e5072 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
@@ -26,7 +26,7 @@ _项目_ 是 Rancher 中引入的对象,可帮助你更有组织地管理 Kube
- [配置工具](../../../reference-guides/rancher-project-tools.md)
- [配置 Pod 安全策略](manage-pod-security-policies.md)
-### 授权
+## 授权
非管理者用户只有在[管理员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者或成员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)将非管理员用户添加到项目的**成员**选项卡后,才能获取项目的访问权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
index 5fa126a9089..6dd3a86e7eb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
@@ -4,13 +4,13 @@ title: 自定义 Grafana 仪表板
在本文中,你将学习通过自定义 Grafana 仪表板来显示特定容器的指标。
-### 先决条件
+## 先决条件
在自定义 Grafana 仪表板之前,你必须先安装 `rancher-monitoring` 应用。
要查看指向外部监控 UI(包括 Grafana 仪表板)的链接,你至少需要一个 [project-member 角色](../../../integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#具有-rancher-权限的用户)。
-### 登录 Grafana
+## 登录 Grafana
1. 在 Rancher UI 中,转到要自定义的仪表板的集群。
1. 在左侧导航栏中,单击**监控**。
@@ -18,8 +18,7 @@ title: 自定义 Grafana 仪表板
1. 转到左下角的登录图标,然后单击 **Sign In**。
1. 登录到 Grafana。Grafana 实例的默认 Admin 用户名和密码是 `admin/prom-operator`(无论谁拥有密码,都需要 Rancher 的集群管理员权限才能访问 Grafana 实例)。你还可以在部署或升级 Chart 时替换凭证。
-
-### 获取支持 Grafana 面板的 PromQL 查询
+## 获取支持 Grafana 面板的 PromQL 查询
对于任何面板,你可以单击标题并单击 **Explore** 以获取支持图形的 PromQL 查询。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
index a51a45041ec..40a32dbadab 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
@@ -12,7 +12,7 @@ Prometheus 经过了优化,可以存储基于索引的序列数据。它是为
但是,Prometheus 没有就快速变化的时间序列数量进行对应的优化。因此,如果你在创建和销毁了大量资源的集群(尤其是多租户集群)上安装 Monitoring,可能会出现内存使用量激增的情况。
-### 减少内存激增
+## 减少内存激增
为了减少内存消耗,Prometheus 可以通过抓取更少的指标或在时间序列上添加更少的标签,从而存储更少的时间序列。要查看使用内存最多的序列,你可以查看 Prometheus UI 中的 TSDB(时序数据库)状态页面。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
index c6bac839971..ed318fe4724 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
@@ -4,7 +4,7 @@ title: 启用 Prometheus Federator
## 要求
-默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。
+默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。
有关安装 rancher-monitoring 的说明,请参阅[此页面](../enable-monitoring.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
index 10aaf5d95b7..ce1ad698b1b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
@@ -2,11 +2,11 @@
title: 为工作负载设置 Prometheus Federator
---
-### 显示工作负载的 CPU 和内存指标
+## 显示工作负载的 CPU 和内存指标
使用 Prometheus Federator 显示 CPU 和内存指标的方式与使用 rancher-monitoring 相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#显示工作负载的-cpu-和内存指标)。
-### 设置 CPU 和内存之外的指标
+## 设置 CPU 和内存之外的指标
使用 Prometheus Federator 设置 CPU 和内存之外的指标与使用 rancher-monitoring 的方式相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#设置-cpu-和内存之外的指标)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
index 7ab5dd36f79..f81aa879d86 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
@@ -12,13 +12,13 @@ Grafana 显示聚合数据,你也可以使用 PromQL 查询来查看单个工
要为你的工作负载设置自定义指标,你需要设置一个 Exporter 并创建一个新的 ServiceMonitor 自定义资源,从而将 Prometheus 配置为从 Exporter 中抓取指标。
-### 显示工作负载的 CPU 和内存指标
+## 显示工作负载的 CPU 和内存指标
默认情况下,Monitoring 应用会抓取 CPU 和内存指标。
要获取特定工作负载的细粒度信息,你可以自定义 Grafana 仪表板来显示该工作负载的指标。
-### 设置 CPU 和内存之外的指标
+## 设置 CPU 和内存之外的指标
对于自定义指标,你需要使用 Prometheus 支持的格式来公开应用上的指标。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
index 549bc6fa852..388da060f7a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
@@ -6,14 +6,14 @@ title: 高级配置
-### Alertmanager
+## Alertmanager
有关配置 Alertmanager 自定义资源的信息,请参阅[此页面。](alertmanager.md)
-### Prometheus
+## Prometheus
有关配置 Prometheus 自定义资源的信息,请参阅[此页面。](prometheus.md)
-### PrometheusRules
+## PrometheusRules
有关配置 PrometheusRules 自定义资源的信息,请参阅[此页面。](prometheusrules.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
index 458011a703c..8aab20e1b90 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
@@ -10,7 +10,7 @@ PrometheusRule 定义了一组 Prometheus 告警和/或记录规则。
:::
-### 在 Rancher UI 中创建 PrometheusRule
+## 在 Rancher UI 中创建 PrometheusRule
:::note 先决条件:
@@ -28,7 +28,7 @@ PrometheusRule 定义了一组 Prometheus 告警和/或记录规则。
**结果**:告警可以向接收器发送通知。
-### 关于 PrometheusRule 自定义资源
+## 关于 PrometheusRule 自定义资源
当你定义规则时(在 PrometheusRule 资源的 RuleGroup 中声明),[规则本身的规范](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule)会包含标签,然后 Alertmanager 会使用这些标签来确定接收此告警的路由。例如,标签为 `team: front-end` 的告警将发送到与该标签匹配的所有路由。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
index 7e43c7234b2..bc9c0e68048 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
@@ -31,7 +31,7 @@ target prot opt source destination
sudo iptables --list
```
-下文介绍如何使用 `firewalld`,将[防火墙端口规则](../../pages-for-subheaders/installation-requirements.md#端口要求)应用到高可用 Rancher Server 集群中的节点。
+下文介绍如何使用 `firewalld`,将[防火墙端口规则](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)应用到高可用 Rancher Server 集群中的节点。
## 先决条件
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
index 6cadea62ced..755f0837752 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -4,9 +4,9 @@ title: 为大型安装进行 etcd 调优
当你运行具有 15 个或更多集群的大型 Rancher 安装时,我们建议你扩大 etcd 的默认 keyspace(默认为 2GB)。你最大可以将它设置为 8GB。此外,请确保主机有足够的 RAM 来保存整个数据集。如果需要增加这个值,你还需要同步增加主机的大小。如果你预计在垃圾回收间隔期间 Pod 的变化率很高,你也可以在较小的安装中调整 Keyspace 大小。
-Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
+Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
-### 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
+## 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
```yaml
# RKE cluster.yml
@@ -19,7 +19,7 @@ services:
## 扩展 etcd 磁盘性能
-你可以参见 [etcd 文档](https://etcd.io/docs/v3.4.0/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
+你可以参见 [etcd 文档](https://etcd.io/docs/v3.5/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
此外,为了减少 etcd 磁盘上的 IO 争用,你可以为 data 和 wal 目录使用专用设备。etcd 最佳实践不建议配置 Mirror RAID(因为 etcd 在集群中的节点之间复制数据)。你可以使用 striping RAID 配置来增加可用的 IOPS。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/add-users-to-projects.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/add-users-to-projects.md
index 4f4f6712b0e..dfc72d07c33 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/add-users-to-projects.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/add-users-to-projects.md
@@ -12,11 +12,11 @@ title: 添加项目成员
:::
-### 将成员添加到新项目
+## 将成员添加到新项目
你可以在创建项目时将成员添加到项目中(建议)。有关创建新项目的详细信息,请参阅[集群管理](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)。
-### 将成员添加到现有项目
+## 将成员添加到现有项目
创建项目后,你可以将用户添加为项目成员,以便用户可以访问项目的资源:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
index 94b0333f481..cb1637d2bc8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
@@ -8,7 +8,7 @@ title: 配置驱动
使用 Rancher 中的驱动,你可以管理可以使用哪些供应商来部署[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)或[云服务器节点](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),以允许 Rancher 部署和管理 Kubernetes。
-### Rancher 驱动
+## Rancher 驱动
你可以启用或禁用 Rancher 中内置的驱动。如果相关驱动 Rancher 尚未实现,你可以添加自己的驱动。
@@ -17,7 +17,7 @@ Rancher 中有两种类型的驱动:
* [集群驱动](#集群驱动)
* [主机驱动](#主机驱动)
-### 集群驱动
+## 集群驱动
集群驱动用于配置[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md),例如 GKE、EKS、AKS 等。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将显示为为托管 Kubernetes 集群创建集群的选项。默认情况下,Rancher 与几个现有的集群驱动打包在一起,但你也可以创建自定义集群驱动并添加到 Rancher。
@@ -33,7 +33,7 @@ Rancher 中有两种类型的驱动:
* [Huawei CCE](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md)
* [Tencent](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md)
-### 主机驱动
+## 主机驱动
主机驱动用于配置主机,Rancher 使用这些主机启动和管理 Kubernetes 集群。主机驱动与 [Docker Machine 驱动](https://docs.docker.com/machine/drivers/)相同。创建主机模板时可以显示的主机驱动,是由主机驱动的状态定义的。只有 `active` 主机驱动将显示为创建节点模板的选项。默认情况下,Rancher 与许多现有的 Docker Machine 驱动打包在一起,但你也可以创建自定义主机驱动并添加到 Rancher。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
index 5f9c30227ba..8be4aa59fbb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
@@ -1,12 +1,11 @@
---
title: 集群驱动
---
-
-集群驱动用于在[托管 Kubernetes 提供商](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)(例如 Google GKE)中创建集群。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将作为创建集群的选项显示。默认情况下,Rancher 与多个现有的云提供商集群驱动打包在一起,但你也可以将自定义集群驱动添加到 Rancher。
+集群驱动用于在[托管 Kubernetes 提供商](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)(例如 Google GKE)中创建集群。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将作为创建集群的选项显示。默认情况下,Rancher 与多个现有的云提供商集群驱动打包在一起,但你也可以将自定义集群驱动添加到 Rancher。
如果你不想向用户显示特定的集群驱动,你可以在 Rancher 中停用这些集群驱动,它们将不会作为创建集群的选项出现。
-### 管理集群驱动
+## 管理集群驱动
:::note 先决条件:
@@ -37,6 +36,6 @@ title: 集群驱动
1. 填写**添加集群驱动**表单。然后单击**创建**。
-### 开发自己的集群驱动
+## 开发自己的集群驱动
如果要开发集群驱动并添加到 Rancher,请参考我们的[示例](https://github.com/rancher-plugins/kontainer-engine-driver-example)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
index bb49bcbe46a..f228428fdcd 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
@@ -6,7 +6,7 @@ title: 主机驱动
如果你不想向用户显示特定的主机驱动,则需要停用这些主机驱动。
-#### 管理主机驱动
+## 管理主机驱动
:::note 先决条件:
@@ -36,6 +36,6 @@ title: 主机驱动
1. 在**主机驱动**选项卡上,单击**添加主机驱动**。
1. 填写**添加主机驱动**表单。然后单击**创建**。
-### 开发自己的主机驱动
+## 开发自己的主机驱动
主机驱动使用 [Docker Machine](https://docs.docker.com/machine/) 来实现。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
index 947f2a9784c..8d7be6171d8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
@@ -22,7 +22,7 @@ title: 访问和共享
- 公开 RKE 模板,并与 Rancher 设置中的所有用户共享
- 与受信任修改模板的用户共享模板所有权
-### 与特定用户或组共享模板
+## 与特定用户或组共享模板
要允许用户或组使用你的模板创建集群,你可以为他们提供模板的基本**用户**访问权限。
@@ -36,7 +36,7 @@ title: 访问和共享
**结果**:用户或组可以使用模板创建集群。
-### 与所有用户共享模板
+## 与所有用户共享模板
1. 在左上角,单击 **☰ > 集群管理**。
1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
@@ -46,7 +46,7 @@ title: 访问和共享
**结果**:Rancher 设置中的所有用户都可以使用该模板创建集群。
-### 共享模板所有权
+## 共享模板所有权
如果你是模板的创建者,你可能希望将维护和更新模板的责任委派给其他用户或组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
index 3efac3e477e..1e20c57d100 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -10,10 +10,9 @@ RKE 模板可以应用于新集群。
你无法将集群更改为使用不同的 RKE 模板。你只能将集群更新为同一模板的新版本。
+## 使用 RKE 模板创建集群
-### 使用 RKE 模板创建集群
-
-要使用 RKE 模板添加[由基础设施提供商托管](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的集群,请按照以下步骤操作:
+要使用 RKE 模板添加[由基础设施提供商托管](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的集群,请按照以下步骤操作:
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,单击**创建**并选择基础设施提供商。
@@ -23,11 +22,11 @@ RKE 模板可以应用于新集群。
1. 可选:你可以编辑 RKE 模板所有者在创建模板时标记为**允许用户覆盖**的任何设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。然后,你需要编辑集群来将其升级到新版本。
1. 单击**创建**以启动集群。
-### 更新使用 RKE 模板创建的集群
+## 更新使用 RKE 模板创建的集群
模板所有者创建 RKE 模板时,每个设置在 Rancher UI 中都有一个开关,指示用户是否可以覆盖该设置。
-- 如果某个设置允许用户覆盖,你可以通过[编辑集群](../../../../pages-for-subheaders/cluster-configuration.md)来更新集群中的设置。
+- 如果某个设置允许用户覆盖,你可以通过[编辑集群](../../../../reference-guides/cluster-configuration/cluster-configuration.md)来更新集群中的设置。
- 如果该开关处于关闭状态,则除非集群所有者创建了允许你覆盖这些设置的模板修订版,否则你无法更改这些设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。
如果集群是使用 RKE 模板创建的,你可以编辑集群,来将集群更新为模板的新版本。
@@ -40,7 +39,7 @@ RKE 模板可以应用于新集群。
:::
-### 将现有集群转换为使用 RKE 模板
+## 将现有集群转换为使用 RKE 模板
本节介绍如何使用现有集群创建 RKE 模板。
@@ -56,4 +55,4 @@ RKE 模板可以应用于新集群。
- 创建了一个新的 RKE 模板。
- 将集群转换为使用该新模板。
-- 可以[使用新模板创建新集群](apply-templates.md#使用-rke-模板创建集群)。
\ No newline at end of file
+- 可以[使用新模板创建新集群](#使用-rke-模板创建集群)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
index a3cb27d9b6b..e430820063d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
@@ -8,11 +8,11 @@ title: RKE 模板和基础设施
如果要标准化集群中的硬件,请将 RKE 模板与节点模板或服务器配置工具 (如 Terraform) 结合使用。
-### 节点模板
+## 节点模板
[节点模板](../../../../reference-guides/user-settings/manage-node-templates.md)负责 Rancher 中的节点配置和节点预配。你可以在用户配置文件中设置节点模板,从而定义在每个节点池中使用的模板。启用节点池后,可以确保每个节点池中都有所需数量的节点,并确保池中的所有节点都相同。
-### Terraform
+## Terraform
Terraform 是一个服务器配置工具。它使用基础架构即代码,支持使用 Terraform 配置文件创建几乎所有的基础设施。它可以自动执行服务器配置,这种方式是自文档化的,并且在版本控制中易于跟踪。
@@ -21,14 +21,13 @@ Terraform 是一个服务器配置工具。它使用基础架构即代码,支
Terraform 支持:
- 定义几乎任何类型的基础架构即代码,包括服务器、数据库、负载均衡器、监控、防火墙设置和 SSL 证书
-- 使用应用商店应用和多集群应用
- 跨多个平台(包括 Rancher 和主要云提供商)对基础设施进行编码
- 将基础架构即代码提交到版本控制
- 轻松重复使用基础设施的配置和设置
- 将基础架构更改纳入标准开发实践
- 防止由于配置偏移,导致一些服务器的配置与其他服务器不同
-## Terraform 工作原理
+### Terraform 工作原理
Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配置语言编写的。HashiCorp 配置语言是一种声明性语言,支持定义集群中所需的基础设施、正在使用的云提供商以及提供商的凭证。然后 Terraform 向提供商发出 API 调用,以便有效地创建基础设施。
@@ -38,7 +37,7 @@ Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配
如果你需要对基础设施进行更改,你可以在 Terraform 配置文件中进行更改,而不是手动更新服务器。然后,可以将这些文件提交给版本控制、验证,并根据需要进行检查。然后,当你运行 `terraform apply` 时,更改将会被部署。
-## 使用 Terraform 的技巧
+### 使用 Terraform 的技巧
- [Rancher 2 提供商文档](https://www.terraform.io/docs/providers/rancher2/)提供了如何配置集群大部分的示例。
@@ -54,7 +53,7 @@ Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配
本节描述了一种方法,可以使安全合规相关的配置文件成为集群的标准配置文件。
-在你创建[符合 CIS 基准的集群](../../../../pages-for-subheaders/rancher-security.md)时,你有一个加密配置文件和一个审计日志配置文件。
+在你创建[符合 CIS 基准的集群](../../../../reference-guides/rancher-security/rancher-security.md)时,你有一个加密配置文件和一个审计日志配置文件。
你的基础设施预配系统可以将这些文件写入磁盘。然后在你的 RKE 模板中,你需要指定这些文件的位置,然后将你的加密配置文件和审计日志配置文件作为额外的挂载添加到 `kube-api-server`。
@@ -66,4 +65,4 @@ Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配
- [Terraform 文档](https://www.terraform.io/docs/)
- [Rancher2 Terraform Provider 文档](https://www.terraform.io/docs/providers/rancher2/)
-- [The RanchCast - 第 1 集:Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8):在此演示中,社区主管 Jason van Brackel 使用 Rancher 2 Terraform Provider 创建了节点并创建自定义集群。
\ No newline at end of file
+- [The RanchCast - 第 1 集:Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8):在此演示中,社区主管 Jason van Brackel 使用 Rancher 2 Terraform Provider 创建了节点并创建自定义集群。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
index 6e4adb98239..0e2393f74c4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
@@ -10,14 +10,13 @@ title: 创建和修改 RKE 模板
模板所有者对模板修订版具有完全控制权,并且可以创建新的修订版来更新模板,删除或禁用不应被用于创建集群的修订版,和设置默认的模板修订版。
-
-### 先决条件
+## 先决条件
如果你具有**创建 RKE 模板**权限,则可以创建 RKE 模板,该权限可由[管理员授予](creator-permissions.md)。
如果你是模板的所有者,你可以修改、共享和删除模板。有关如何成为模板所有者的详细信息,请参阅[共享模板所有权文档](access-or-share-templates.md#共享模板所有权)。
-### 创建模板
+## 创建模板
1. 在左上角,单击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -26,9 +25,9 @@ title: 创建和修改 RKE 模板
1. 可选:通过将用户添加为成员,来[与其他用户或组共享模板](access-or-share-templates.md#与特定用户或组共享模板)。你还可以将模板公开,从而与 Rancher 中的所有人共享。
1. 然后按照屏幕上的表格将集群配置参数保存为模板修订的一部分。可以将修订标记为此模板的默认值。
-**结果**:配置了具有一个修订版的 RKE 模板。你可以稍后在[配置 Rancher 启动的集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)时使用此 RKE 模板修订版。通过 RKE 模板管理集群后,集群无法解除与模板的绑定,并且无法取消选中**使用现有 RKE 模板和修订版**。
+**结果**:配置了具有一个修订版的 RKE 模板。你可以稍后在[配置 Rancher 启动的集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)时使用此 RKE 模板修订版。通过 RKE 模板管理集群后,集群无法解除与模板的绑定,并且无法取消选中**使用现有 RKE 模板和修订版**。
-### 更新模板
+## 更新模板
更新 RKE 模板相当于创建现有模板的修订版。使用旧版本模板创建的集群可以进行更新,从而匹配新版本。
@@ -44,7 +43,7 @@ title: 创建和修改 RKE 模板
**结果**:模板已更新。要将其应用到使用旧版本模板的集群,请参阅[升级集群以使用新的模板修订版](#升级集群以使用新的模板修订版)。
-### 删除模板
+## 删除模板
当不再需要为任何集群使用某个 RKE 模板时,可以将其删除。
@@ -55,7 +54,7 @@ title: 创建和修改 RKE 模板
**结果**:模板被删除。
-### 基于默认版创建新修订版
+## 基于默认版创建新修订版
你可以复制默认模板修订版并快速更新其设置,而无需从头开始创建新修订版。克隆模板为你省去了重新输入集群创建所需的访问密钥和其他参数的麻烦。
@@ -66,7 +65,7 @@ title: 创建和修改 RKE 模板
**结果**:克隆并配置了 RKE 模板修订版。
-### 基于克隆版创建新修订版
+## 基于克隆版创建新修订版
通过用户设置创建新的 RKE 模板修订版时,可以克隆现有修订版并快速更新其设置,而无需从头开始创建新的修订版。克隆模板修订省去了重新输入集群参数的麻烦。
@@ -77,7 +76,7 @@ title: 创建和修改 RKE 模板
**结果**:克隆并配置了 RKE 模板修订版。你可以在配置集群时使用 RKE 模板修订。任何使用此 RKE 模板的现有集群都可以升级到此新版本。
-### 禁用模板修订版
+## 禁用模板修订版
当你不需要将 RKE 模板修订版本用于创建新集群时,可以禁用模板修订版。你也可以重新启用禁用了的修订版。
@@ -89,7 +88,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版不能用于创建新集群。
-### 重新启用禁用的模板修订版
+## 重新启用禁用的模板修订版
如果要使用已禁用的 RKE 模板修订版来创建新集群,你可以重新启用该修订版。
@@ -99,7 +98,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版可用于创建新集群。
-### 将模板修订版设置为默认
+## 将模板修订版设置为默认
当最终用户使用 RKE 模板创建集群时,他们可以选择使用哪个版本来创建集群。你可以配置默认使用的版本。
@@ -111,7 +110,7 @@ title: 创建和修改 RKE 模板
**结果**:使用模板创建集群时,RKE 模板修订版将用作默认选项。
-### 删除模板修订版
+## 删除模板修订版
你可以删除模板的所有修订(默认修订除外)。
@@ -123,7 +122,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版被删除。
-### 升级集群以使用新的模板修订版
+## 升级集群以使用新的模板修订版
:::note
@@ -142,7 +141,7 @@ title: 创建和修改 RKE 模板
**结果**:集群已升级为使用新模板修订版中定义的设置。
-### 将正在运行的集群导出到新的 RKE 模板和修订版
+## 将正在运行的集群导出到新的 RKE 模板和修订版
你可以将现有集群的设置保存为 RKE 模板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
index b9997d6e7b4..9d2a9590372 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
@@ -42,7 +42,7 @@ Rancher 认证代理可以与以下外部认证服务集成。
## 用户和组
-Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目、多集群应用以及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
+Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
:::note
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
index 704c47a68c5..e6ba881dc5a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
@@ -4,11 +4,11 @@ title: 配置 Active Directory (AD)
如果你的组织使用 Microsoft Active Directory 作为中心用户仓库,你可以将 Rancher 配置为与 Active Directory 服务器通信,从而对用户进行身份验证。这使 Rancher 管理员可以对外部用户系统中的用户和组进行集群和项目的访问控制,同时允许最终用户在登录 Rancher UI 时使用 Active Directory 凭证进行身份验证。
-Rancher 使用 LDAP 与 Active Directory 服务器通信。因此,Active Directory 与 [OpenLDAP 身份验证](../../../../pages-for-subheaders/configure-openldap.md)的流程相同。
+Rancher 使用 LDAP 与 Active Directory 服务器通信。因此,Active Directory 与 [OpenLDAP 身份验证](../configure-openldap/configure-openldap.md)的流程相同。
:::note
-在开始之前,请熟悉[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)的概念。
+在开始之前,请熟悉[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)的概念。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
index 77d51e007bf..45c5a5d12cb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
@@ -43,7 +43,6 @@ Rancher 中的 Microsoft Graph API 流程正在不断发展。建议你使用最

1. 输入 **Name**(例如 `Rancher`)。
-
1. 在 **Supported account types** 中,选择 **Accounts in this organizational directory only (AzureADTest only - Single tenant)**。这对应于旧版应用注册选项。
@@ -260,7 +259,7 @@ Rancher 未测试也未完全支持自定义端点。
#### 离线环境
-在离线环境中,由于 Graph Endpoint URL 正在更改,因此管理员需要确保其端点被[列入白名单](#3.2)。
+在离线环境中,由于 Graph Endpoint URL 正在更改,因此管理员需要确保其端点被[列入白名单](#1-在-azure-注册-rancher)。
#### 回滚迁移
@@ -322,5 +321,5 @@ Rancher 未测试也未完全支持自定义端点。
>
> - 如果你不想在 Azure AD Graph API 停用后升级到 v2.7.0+,你需要:
> - 使用内置的 Rancher 身份认证,或者
-> - 使用另一个第三方身份认证系统并在 Rancher 中进行设置。请参阅[身份验证文档](../../../../pages-for-subheaders/authentication-config.md),了解如何配置其他开放式身份验证提供程序。
+> - 使用另一个第三方身份认证系统并在 Rancher 中进行设置。请参阅[身份验证文档](./authentication-config.md),了解如何配置其他开放式身份验证提供程序。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
index 6f3100e462c..eab27ffb571 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
@@ -8,7 +8,7 @@ title: 配置 FreeIPA
- 你必须配置了 [FreeIPA 服务器](https://www.freeipa.org/)。
- 在 FreeIPA 中创建一个具有 `read-only` 访问权限的 ServiceAccount 。当用户使用 API 密钥发出请求时,Rancher 使用此账号来验证组成员身份。
-- 参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+- 参见[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
index e903421611d..59eb79748fe 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
@@ -6,7 +6,7 @@ title: 配置 GitHub
:::note 先决条件:
-参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+参见[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
index 212f7cf45d1..4b9daebde82 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
@@ -96,7 +96,7 @@ OpenLDAP ServiceAccount 用于所有搜索。无论用户个人的 SAML 权限
[配置 OpenLDAP Server、组和用户的设置](../configure-openldap/openldap-config-reference.md)。请注意,不支持嵌套组成员。
-> 在继续配置之前,请熟悉[外部身份认证配置和主要用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+> 在继续配置之前,请熟悉[外部身份认证配置和主要用户](./authentication-config.md#外部认证配置和用户主体)。
1. 使用分配了 [administrator](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions) 角色(即 _本地主体_)的本地用户登录到 Rancher。
1. 在左上角,单击 **☰ > 用户 & 认证**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
index b44cc440312..72dda0595a4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
@@ -4,7 +4,7 @@ title: 用户和组
Rancher 依赖用户和组来决定允许登录到 Rancher 的用户,以及他们可以访问哪些资源。你配置外部身份验证提供程序后,该提供程序的用户将能够登录到你的 Rancher Server。用户登录时,验证提供程序将向你的 Rancher Server 提供该用户所属的组列表。
-你可以通过向资源添加用户或组,来控制其对集群、项目、多集群应用、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+你可以通过向资源添加用户或组,来控制其对集群、项目、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
## 管理成员
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
index 83bcfee396b..ed1dcf5df1f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
@@ -83,6 +83,6 @@ https:///federationmetadata/2007-06/federationmetadata.xml
**结果**:你已将 Rancher 添加为依赖信任方。现在你可以配置 Rancher 来使用 AD。
-### 后续操作
+## 后续操作
[在 Rancher 中配置 Microsoft AD FS ](configure-rancher-for-ms-adfs.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
index 26e89e2058d..325f86ca45b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
@@ -41,17 +41,13 @@ title: 2. 在 Rancher 中配置 Microsoft AD FS
| UID 字段 | 每个用户独有的 AD 属性。 示例:`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` |
| 用户组字段 | 创建用于管理组成员关系的条目。 示例:`http://schemas.xmlsoap.org/claims/Group` |
| Rancher API 主机 | Rancher Server 的 URL。 |
-| 私钥/证书 | 在 Rancher 和你的 AD FS 之间创建安全外壳(SSH)的密钥/证书对。确保将 Common Name (CN) 设置为 Rancher Server URL。 [证书创建命令](#cert-command) |
+| 私钥/证书 | 在 Rancher 和你的 AD FS 之间创建安全外壳(SSH)的密钥/证书对。确保将 Common Name (CN) 设置为 Rancher Server URL。 [证书创建命令](#example-certificate-creation-command) |
| 元数据 XML | 从 AD FS 服务器导出的 `federationmetadata.xml` 文件。 你可以在 `https:///federationmetadata/2007-06/federationmetadata.xml` 找到该文件。 |
-
-
-:::tip
+### Example Certificate Creation Command
你可以使用 openssl 命令生成证书。例如:
```
openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
```
-
-:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
index 7594371a296..652e2457f37 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
@@ -53,4 +53,4 @@ title: 配置 OpenLDAP
## 附录:故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
index 4321f1e6765..ab226898b81 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
@@ -10,14 +10,14 @@ title: Shibboleth 和 OpenLDAP 的组权限
你可以通过配置 OpenLDAP 来解决这个问题。如果让 Shibboleth 使用 OpenLDAP 后端,你将能够在 Rancher 中搜索组,并从 Rancher UI 将集群、项目或命名空间等资源分配给用户组。
-### 名词解释
+## 名词解释
- **Shibboleth**:用于计算机网络和互联网的单点登录系统。它允许用户仅使用一种身份登录到各种系统。它验证用户凭证,但不单独处理组成员身份。
- **SAML**:安全声明标记语言(Security Assertion Markup Language),用于在身份提供程序和服务提供商之间交换认证和授权数据的开放标准。
- **OpenLDAP**:轻型目录访问协议(LDAP)的免费开源实现。它用于管理组织的计算机和用户。OpenLDAP 对 Rancher 用户很有用,因为它支持组。只要组已存在于身份提供程序中,你就可以在 Rancher 中为组分配权限,从而让组访问资源(例如集群,项目或命名空间)。
- **IdP 或 IDP**:身份提供程序。OpenLDAP 是身份提供程序的一个例子。
-### 将 OpenLDAP 组权限添加到 Rancher 资源
+## 将 OpenLDAP 组权限添加到 Rancher 资源
下图说明了 OpenLDAP 组的成员如何访问 Rancher 中该组有权访问的资源。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
index 285a5d3e6aa..fe0c5cde49b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
@@ -101,4 +101,4 @@ SAML 协议不支持用户或用户组的搜索或查找。因此,如果你没
## 故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
index 1136fdabc91..927291ced0b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
@@ -56,9 +56,9 @@ Rancher 内置了三个默认 Pod 安全策略 (PSP),分别是 `restricted-nor
### 要求
-Rancher 只能为[使用 RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)分配 PSP。
+Rancher 只能为[使用 RKE 启动的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)分配 PSP。
-你必须先在集群级别启用 PSP,然后才能将它们分配给项目。这可以通过[编辑集群](../../../pages-for-subheaders/cluster-configuration.md)来配置。
+你必须先在集群级别启用 PSP,然后才能将它们分配给项目。这可以通过[编辑集群](../../../reference-guides/cluster-configuration/cluster-configuration.md)来配置。
最好的做法是在集群级别设置 PSP。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
index 818ccfc025a..bb7a5210a58 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
@@ -5,7 +5,7 @@ title: 配置全局默认私有镜像仓库
:::note
本页介绍了安装 Rancher 后如何从 Rancher UI 配置全局默认私有镜像仓库。
-有关如何在 Rancher 安装期间设置私有镜像仓库的说明,请参阅[离线安装指南](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+有关如何在 Rancher 安装期间设置私有镜像仓库的说明,请参阅[离线安装指南](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
index 09b1cec4990..8f293098c0c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
@@ -9,7 +9,7 @@ title: 集群和项目角色
1. 单击 **☰ > 用户 & 认证**。
1. 在左侧导航栏中,单击**角色**并转到**集群**或**项目或命名空间**选项卡。
-### 成员资格和角色分配
+## 成员资格和角色分配
非管理用户可以访问的项目和集群由 _成员资格_ 决定。成员资格是根据该集群或项目中分配的角色而有权访问特定集群或项目的用户列表。每个集群和项目都包含一个选项卡,具有适当权限的用户可以使用该选项卡来管理成员资格。
@@ -21,7 +21,7 @@ title: 集群和项目角色
:::
-### 集群角色
+## 集群角色
_集群角色_ 是你可以分配给用户的角色,以授予他们对集群的访问权限。集群的两个主要角色分别是`所有者`和`成员`。
@@ -33,11 +33,11 @@ _集群角色_ 是你可以分配给用户的角色,以授予他们对集群
可以查看大多数集群级别的资源并创建新项目。
-#### 自定义集群角色
+### 自定义集群角色
Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典型的`所有者`或`成员`角色。这些角色可以是内置的自定义集群角色,也可以是 Rancher 管理员定义的角色。这些角色便于为集群内的普通用户定义更受限或特定的访问权限。有关内置自定义集群角色的列表,请参阅下表。
-#### 集群角色参考
+### 集群角色参考
下表列出了可用的内置自定义集群角色,以及默认的集群级别角色`集群所有者`和`集群成员`是否包含该权限:
@@ -54,7 +54,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
| 查看集群成员 | ✓ | ✓ |
| 查看节点 | ✓ | ✓ |
-#### 管理节点权限
+### 管理节点权限
下表列出了 RKE 和 RKE2 中`管理节点`角色可用的权限:
@@ -79,7 +79,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
:::
-### 为集群成员提供自定义集群角色
+## 为集群成员提供自定义集群角色
在管理员[设置自定义集群角色后](custom-roles.md),集群所有者和管理员可以将这些角色分配给集群成员。
@@ -121,7 +121,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
**结果**:成员具有所分配的角色。
-### 项目角色
+## 项目角色
_项目角色_ 是用于授予用户访问项目权限的角色。主要的项目角色分别是`所有者`、`成员`和`只读`。
@@ -149,11 +149,11 @@ _项目角色_ 是用于授予用户访问项目权限的角色。主要的项
:::
-#### 自定义项目角色
+### 自定义项目角色
Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典型的`所有者`、`成员`或`只读`角色。这些角色可以是内置的自定义项目角色,也可以是 Rancher 管理员定义的角色。这些角色便于为项目内的普通用户定义更受限或特定的访问权限。有关内置自定义项目角色的列表,请参阅下表。
-#### 项目角色参考
+### 项目角色参考
下表列出了 Rancher 中可用的内置自定义项目角色,以及这些角色是否由`所有者`,`成员`或`只读`角色授予的:
@@ -187,12 +187,12 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
:::
-### 定义自定义角色
+## 定义自定义角色
如前所述,你可以定义自定义角色,并将这些角色用在集群或项目中。上下文字段定义了角色是否显示在集群成员页面、项目成员页面或同时显示在这两个页面。
定义自定义角色时,你可以授予对特定资源的访问权限,或指定自定义角色应继承的角色。自定义角色可以由特定授权和继承角色组成。所有授权都是累加的。换言之,如果你为特定资源定义更受限的授权,自定义角色继承的角色中定义的更广泛的授权**不会**被覆盖。
-### 默认集群和项目角色
+## 默认集群和项目角色
默认情况下,在普通用户创建新集群或项目时,他们会自动分配到所有者的角色,即[集群所有者](#集群角色)或[项目所有者](#项目角色)。但是,在某些组织中,这些角色可能会被认为有过多的管理访问权限。在这种情况下,你可以将默认角色更改为更具限制性的角色,例如一组单独的角色或一个自定义角色。
@@ -211,7 +211,7 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
:::
-### 为集群和项目创建者配置默认角色
+## 为集群和项目创建者配置默认角色
你可以更改为创建集群或项目的用户自动创建的角色:
@@ -226,7 +226,7 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
如果要删除默认角色,请编辑权限,并在默认角色选项中选择**否**。
-### 撤销集群成员资格
+## 撤销集群成员资格
如果你撤销一个普通用户的集群成员资格,而且该用户已显式分配集群的集群 _和_ 项目的成员资格,该普通用户将[失去集群角色](#集群角色)但[保留项目角色](#项目角色)。换句话说,即使你已经撤销了用户访问集群和其中的节点的权限,但该普通用户仍然可以:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
index 0ab8560983b..80d740addcc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
@@ -98,7 +98,7 @@ title: 自定义角色
只有在以下情况下,你才能将全局角色分配给组:
-* 你已设置[外部身份验证提供程序](../../../../pages-for-subheaders/authentication-config.md#外部验证与本地验证)。
+* 你已设置[外部身份验证提供程序](../authentication-config/authentication-config.md#外部认证与本地认证)。
* 外部身份验证提供程序支持[用户组](../../authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md)。
* 你已使用身份验证提供程序设置了至少一个用户组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
index 7f7606413f1..7eb2cd0457e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
@@ -254,7 +254,7 @@ inheritedClusterRoles:
只有在以下情况下,你才能将全局角色分配给组:
-- 你已设置[外部认证](../authentication-config/authentication-config.md#external-vs-local-authentication)
+- 你已设置[外部认证](../authentication-config/authentication-config.md#外部认证与本地认证)
- 外部认证服务支持[用户组](../authentication-config/manage-users-and-groups.md)
- 你已使用外部认证服务设置了至少一个用户组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
index 1f56b62762f..7c16ac10192 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
@@ -15,12 +15,12 @@ PSS 定义了工作负载的安全级别。PSA 描述了 Pod 安全上下文和
必须在删除 PodSecurityPolicy 对象_之前_添加新的策略执行机制。否则,你可能会为集群内的特权升级攻击创造机会。
:::
-### 从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies {#remove-psp-rancher-workloads}
+### 从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies
Rancher v2.7.2 提供了 Rancher 维护的 Helm Chart 的新主要版本。v102.x.y 允许你删除与以前的 Chart 版本一起安装的 PSP。这个新版本使用标准化的 `global.cattle.psp.enabled` 开关(默认关闭)替换了非标准的 PSP 开关。
你必须在_仍使用 Kubernetes v1.24_ 时执行以下步骤:
-1. 根据需要配置 PSA 控制器。你可以使用 Rancher 的内置 [PSA 配置模板](#psa-config-templates),或创建自定义模板并将其应用于正在迁移的集群。
+1. 根据需要配置 PSA 控制器。你可以使用 Rancher 的内置 [PSA 配置模板](#pod-安全准入配置模板),或创建自定义模板并将其应用于正在迁移的集群。
1. 将活动的 PSP 映射到 Pod 安全标准:
1. 查看集群中哪些 PSP 仍处于活动状态:
@@ -108,14 +108,14 @@ Helm 尝试在集群中查询存储在先前版本的数据 blob 中的对象时
#### 将 Chart 升级到支持 Kubernetes v1.25 的版本
-清理了具有 PSP 的所有版本后,你就可以继续升级了。对于 Rancher 维护的工作负载,请按照本文档[从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies](#remove-psp-rancher-workloads) 部分中的步骤进行操作。
+清理了具有 PSP 的所有版本后,你就可以继续升级了。对于 Rancher 维护的工作负载,请按照本文档[从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies](#从-rancher-维护的应用程序和市场工作负载中删除-podsecuritypolicies) 部分中的步骤进行操作。
如果工作负载不是由 Rancher 维护的,请参阅对应的提供商的文档。
:::caution
不要跳过此步骤。与 Kubernetes v1.25 不兼容的应用程序不能保证在清理后正常工作。
:::
-## Pod 安全准入配置模板 {#psa-config-templates}
+## Pod 安全准入配置模板
Rancher 提供了 PSA 配置模板。它们是可以应用到集群的预定义安全配置。Rancher 管理员(或具有权限的人员)可以[创建、管理和编辑](./psa-config-templates.md) PSA 模板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
index c3b4d6252f4..5089f8e11d7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
@@ -32,7 +32,7 @@ title: Pod 安全准入 (PSA) 配置模板
### 加固集群
-如果选择 **rancher-restricted** 模板但不选择 **CIS 配置文件**,你将无法满足 CIS Benchmark。有关详细信息,请参阅 [RKE2 加固指南](../../../pages-for-subheaders/rke2-hardening-guide.md)。
+如果选择 **rancher-restricted** 模板但不选择 **CIS 配置文件**,你将无法满足 CIS Benchmark。有关详细信息,请参阅 [RKE2 加固指南](../../../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-hardening-guide.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
index 3ba17f6ed64..3916af6a508 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
@@ -2,7 +2,7 @@
title: 备份集群
---
-在 Rancher UI 中,你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。
+在 Rancher UI 中,你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的 etcd。
Rancher 建议为所有生产集群配置定期 `etcd` 快照。此外,你还可以创建单次快照。
@@ -161,7 +161,7 @@ Rancher 在创建 RKE2 或 K3s 集群的快照时,快照名称是基于快照
选择创建定期快照的频率以及要保留的快照数量。时间的单位是小时。用户可以使用时间戳快照进行时间点恢复。
-默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)会配置为创建定期快照(保存到本地磁盘)。为防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+默认情况下,[Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)会配置为创建定期快照(保存到本地磁盘)。为防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
在集群配置或编辑集群期间,可以在**集群选项**的高级部分中找到快照的配置。点击**显示高级选项**。
@@ -179,7 +179,7 @@ Rancher 在创建 RKE2 或 K3s 集群的快照时,快照名称是基于快照
设置创建定期快照的方式以及要保留的快照数量。该计划采用传统的 Cron 格式。保留策略规定了在每个节点上要保留的匹配名称的快照数量。
-默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)从凌晨 12 点开始每 5 小时创建一次定期快照(保存到本地磁盘)。为了防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+默认情况下,[Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)从凌晨 12 点开始每 5 小时创建一次定期快照(保存到本地磁盘)。为了防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
在集群配置或编辑集群期间,你可以在**集群配置**下找到快照配置。单击 **etcd**。
@@ -244,12 +244,12 @@ Rancher 支持两种不同的备份目标:
-默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会在本地自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中 etcd 节点的 `/opt/rke/etcd-snapshots` 中。所有定期快照都是按照配置的时间间隔创建的。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会在本地自动保存到 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中 etcd 节点的 `/opt/rke/etcd-snapshots` 中。所有定期快照都是按照配置的时间间隔创建的。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
-默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中的本地 etcd 节点上的 `/var/lib/rancher//server/db/snapshots` 中,其中 `` 可以是 `k3s` 或 `rke2`。所有定期快照均按照 Cron 计划进行。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会自动保存到 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中的本地 etcd 节点上的 `/var/lib/rancher//server/db/snapshots` 中,其中 `` 可以是 `k3s` 或 `rke2`。所有定期快照均按照 Cron 计划进行。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
index cf023f56e0a..b113fda1c5e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
@@ -14,13 +14,13 @@ title: 备份 Rancher
:::
-### 先决条件
+## 先决条件
Rancher 必须是 2.5.0 或更高版本。
请参见[此处](migrate-rancher-to-new-cluster.md#2-使用-restore-自定义资源来还原备份)获取在 Rancher 2.6.3 中将现有备份文件恢复到 v1.22 集群的帮助。
-### 1. 安装 Rancher Backup Operator
+## 1. 安装 Rancher Backup Operator
备份存储位置是 operator 级别的设置,所以需要在安装或升级 `rancher backup` 应用时进行配置。
@@ -36,11 +36,11 @@ Rancher 必须是 2.5.0 或更高版本。
:::note
-使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../deploy-apps-across-clusters/fleet.md#故障排除)获得解决方法。
+使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../../../integrations-in-rancher/fleet/overview.md#故障排除)获得解决方法。
:::
-### 2. 执行备份
+## 2. 执行备份
要执行备份,必须创建 Backup 类型的自定义资源。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
index 5aa5ac6e523..7b97ec76ce5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
@@ -4,7 +4,7 @@ title: 将 Rancher 迁移到新集群
如果你要将 Rancher 迁移到一个新的 Kubernetes 集群,先不要在新集群上安装 Rancher。这是因为如果将 Rancher 还原到已安装 Rancher 的新集群,可能会导致问题。
-### 先决条件
+## 先决条件
以下说明假设你已经完成[备份创建](back-up-rancher.md),并且已经安装了用于部署 Rancher 的新 Kubernetes 集群。
@@ -21,7 +21,8 @@ Rancher 可以安装到任意 Kubernetes 集群上,包括托管的 Kubernetes
- [RKE Kubernetes 安装文档](https://rancher.com/docs/rke/latest/en/installation/)
- [K3s Kubernetes 安装文档](https://rancher.com/docs/k3s/latest/en/installation/)
-### 1. 安装 rancher-backup Helm Chart
+## 1. 安装 rancher-backup Helm Chart
+
安装 [rancher-backup chart](https://github.com/rancher/backup-restore-operator/tags),请使用 2.x.x 主要版本内的版本:
1. 添加 helm 仓库:
@@ -55,7 +56,7 @@ Rancher 可以安装到任意 Kubernetes 集群上,包括托管的 Kubernetes
:::
-### 2. 使用 Restore 自定义资源来还原备份
+## 2. 使用 Restore 自定义资源来还原备份
:::note 重要提示:
@@ -150,11 +151,11 @@ Kubernetes v1.22 是 Rancher 2.6.3 的实验功能,不支持使用 apiVersion
1. Restore 资源的状态变成 `Completed` 后,你可以继续安装 cert-manager 和 Rancher。
-### 3. 安装 cert-manager
+## 3. 安装 cert-manager
-按照在 Kubernetes 上安装 cert-manager的步骤[安装 cert-manager](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。
+按照在 Kubernetes 上安装 cert-manager的步骤[安装 cert-manager](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。
-### 4. 使用 Helm 安装 Rancher
+## 4. 使用 Helm 安装 Rancher
使用与第一个集群上使用的相同版本的 Helm 来安装 Rancher:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
index 97e23f81446..fc8066fe879 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
@@ -2,7 +2,7 @@
title: 使用备份恢复集群
---
-你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
+你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcdrke)。
@@ -126,4 +126,4 @@ Rancher UI 中提供了集群所有可用快照的列表:
5. 运行修改后的命令。
-6. 在单个节点启动并运行后,Rancher 建议向你的集群添加额外的 etcd 节点。如果你有一个[自定义集群](../../../pages-for-subheaders/use-existing-nodes.md),并且想要复用旧节点,则需要先[清理节点](../manage-clusters/clean-cluster-nodes.md),然后再尝试将它们重新添加到集群中。
+6. 在单个节点启动并运行后,Rancher 建议向你的集群添加额外的 etcd 节点。如果你有一个[自定义集群](../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md),并且想要复用旧节点,则需要先[清理节点](../manage-clusters/clean-cluster-nodes.md),然后再尝试将它们重新添加到集群中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
index eeb6cb521f0..b4331859f7f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
@@ -5,6 +5,7 @@ title: 还原 Rancher
本页概述了如何使用 Rancher 执行恢复。
在以下情况下,请按照本页中的说明进行操作:
+
- 正在运行的 Rancher 实例与备份时的版本相同。
- 上游(本地)集群与备份的位置相同。
@@ -21,7 +22,7 @@ title: 还原 Rancher
:::
-### 创建 Restore 自定义资源
+## 创建 Restore 自定义资源
还原是通过创建 Restore 自定义资源实现的。
@@ -60,7 +61,7 @@ title: 还原 Rancher
2. 集群范围资源
3. 命名空间资源
-### 日志
+## 日志
如需查看还原的处理方式,请检查 Operator 的日志。查看日志的命令如下:
@@ -68,11 +69,11 @@ title: 还原 Rancher
kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f
```
-### 清理
+## 清理
如果你使用 kubectl 创建了 Restore 资源,请删除该资源以防止与未来的还原发生命名冲突。
-### 已知问题
+## 已知问题
在某些情况下,恢复备份后,Rancher 日志会显示类似以下的错误:
```
2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
deleted file mode 100644
index 4d4d465d136..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-
-
-
-不同版本的 Rancher 提供了几种不同的方式来部署跨集群应用。
-
-## Fleet
-
-Rancher v2.5 及更高版本使用 Fleet 跨集群部署应用
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](fleet.md)。
-
-### 多集群应用
-
-在 v2.5 之前的 Rancher 中,多集群应用功能用于跨集群部署应用。多集群应用功能已弃用,但仍可作为旧版功能使用。
-
-详情请参阅[此文档](multi-cluster-apps.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
deleted file mode 100644
index def223a8337..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-title: 使用 Feet 进行持续交付
----
-
-使用 Fleet 的持续交付是大规模的 GitOps。你可以使用 Fleet 管理多达一百万个集群。Fleet 非常轻量,可以很好地用于[单个集群](https://fleet.rancher.io/installation#default-install),但是在你达到[大规模](https://fleet.rancher.io/installation#configuration-for-multi-cluster)时,它能发挥更强的实力。此处的大规模指的是大量集群、大量部署、或组织中存在大量团队的情况。
-
-Fleet 是一个独立于 Rancher 的项目,你可以使用 Helm 将它安装在任何 Kubernetes 集群上。
-
-
-## 架构
-
-有关 Fleet 工作原理的信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/architecture.md)。
-
-## 在 Rancher UI 中访问 Fleet
-
-Fleet 预装在 Rancher 中,通过 Rancher UI 中的**持续交付**选项管理。有关持续交付和 Fleet 故障排除技巧的更多信息,请参阅[此处](https://fleet.rancher.io/troubleshooting)。
-
-用户可以通过遵循 **gitops** 的实践,利用持续交付将应用部署到 git 仓库中的 Kubernetes 集群,而无需任何手动操作。
-
-按照以下步骤在 Rancher UI 中访问持续交付:
-
-1. 单击 **☰ > 持续交付**。
-
-1. 在菜单顶部选择你的命名空间,注意以下几点:
- - 默认情况下会选中 `fleet-default`,其中包括注册到 Rancher 的所有下游集群。
- - 你可以切换到仅包含 `local` 集群的 `fleet-local`,或者创建自己的工作空间,并将集群分配和移动到该工作空间。
- - 然后,你可以单击左侧导航栏上的**集群**来管理集群。
-
-1. 单击左侧导航栏上的 **Git 仓库**将 git 仓库部署到当前工作空间中的集群中。
-
-1. 选择你的 [git 仓库](https://fleet.rancher.io/gitrepo-add)和[目标集群/集群组](https://fleet.rancher.io/gitrepo-targets)。你还可以单击左侧导航栏中的**集群组**在 UI 中创建集群组。
-
-1. 部署 git 仓库后,你可以通过 Rancher UI 监控应用。
-
-## Windows 支持
-
-有关对具有 Windows 节点的集群的支持的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md)。
-
-
-## GitHub 仓库
-
-你可以单击此处获取 [Fleet Helm Chart](https://github.com/rancher/fleet/releases/latest)。
-
-
-## 在代理后使用 Fleet
-
-有关在代理后使用 Fleet 的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md)。
-
-## Helm Chart 依赖
-
-由于用户需要完成依赖列表,因此为了成功部署具有依赖项的 Helm Chart,你必须手动运行命令(如下所列)。如果你不这样做,并继续克隆仓库并运行 `helm install`,由于依赖项将丢失,因此你的安装将失败。
-
-git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖项。你必须手动运行 `helm dependencies update $chart`,或在本地运行 `helm dependencies build $chart`,然后将完整的 Chart 目录提交到你的 git 仓库。请注意,你需要使用适当的参数来修改命令。
-
-## 故障排除
-
----
-* **已知问题**:Fleet git 仓库的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。如果我们有了永久的解决方案,我们将通知社区。
-
-* **临时解决方法:**
- 默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则需要重新创建密文。要修改 resourceSet 以包含需要备份的其他资源,请参阅[此文档](https://github.com/rancher/backup-restore-operator#user-flow)。
-
----
-
-## 文档
-
-Fleet 文档链接:[https://fleet.rancher.io/](https://fleet.rancher.io/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
deleted file mode 100644
index 113a30ad48b..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
+++ /dev/null
@@ -1,175 +0,0 @@
----
-title: 多集群应用
----
-
-通常,大多数应用都部署在单个 Kubernetes 集群上,但有时你可能需要跨不同集群和/或项目部署同一应用的多个副本。在 Rancher 中,_多集群应用_ 指的是使用 Helm Chart 跨多个集群部署的应用。由于能够跨多个集群部署相同的应用,因此可以避免在每个集群上重复执行相同的应用配置操作而引入的人为错误。使用多集群应用,你可以通过自定义在所有项目/集群中使用相同的配置,并根据你的目标项目更改配置。由于多集群应用被视为单个应用,因此更容易管理和维护。
-
-全局应用商店中的任何 Helm Chart 都可用于部署和管理多集群应用。
-
-创建多集群应用后,你可以对全局 DNS 条目进行编程,以便更轻松地访问应用。
-
-## 先决条件
-
-### 权限
-
-要在 Rancher 中创建多集群应用,你至少需要具有以下权限之一:
-
-- 目标集群中的[项目成员角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),能够创建、读取、更新和删除工作负载
-- 目标项目所在集群的[集群所有者角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)
-
-### 启用旧版功能
-
-由于 Rancher 2.5 已弃用多集群应用并使用 Fleet 取代它,你需要使用功能开关以启用多集群应用。
-
-1. 在左上角,单击 **☰ > 全局设置**。
-1. 单击**功能开关**。
-1. 转到 `Legacy` 功能开关并单击**激活**。
-
-## 启动多集群应用
-
-1. 在左上角,单击**☰ > 多集群应用**。
-1. 点击**启动**。
-1. 找到要启动的应用。
-1. (可选)查看来自 Helm Chart `README` 的详细描述。
-1. 在**配置选项**下输入多集群应用的**名称**。默认情况下,此名称还用于在每个[目标项目](#目标)中为多集群应用创建一个 Kubernetes 命名空间。命名空间命名为 `-`。
-1. 选择一个**模板版本**。
-1. 完成[多集群应用配置选项](#多集群应用配置选项)以及[应用配置选项](#应用配置选项)。
-1. 选择可以[与多集群应用交互](#成员)的**成员**。
-1. 添加[自定义应用配置答案](#覆盖特定项目的应用配置选项),这将更改默认应用配置答案中特定项目的配置。
-1. 查看**预览**中的文件。确认后,单击**启动**。
-
-**结果**:应用已部署到所选的命名空间。你可以从项目中查看应用状态。
-
-## 多集群应用配置选项
-
-Rancher 将多集群应用的配置选项分为以下几个部分。
-
-### 目标
-
-在**目标**部分中,选择用于部署应用的项目。项目列表仅显示你有权访问的项目。所选的每个项目都会被添加到列表中,其中显示了所选的集群名称和项目名称。要移除目标项目,单击 **-**。
-
-### 升级
-
-在**升级**部分中,选择升级应用时需要使用的升级策略。
-
-* **滚动更新(批量)**:选择此升级策略时,每次升级的应用数量取决于选择的**批量大小**和**间隔**(多少秒后才开始下一批更新)。
-
-* **同时升级所有应用**:选择此升级策略时,所有项目的所有应用都将同时升级。
-
-### 角色
-
-在**角色**中,你可以定义多集群应用的角色。通常,当用户[启动商店应用](../../../pages-for-subheaders/helm-charts-in-rancher.md)时,该用户的权限会用于创建应用所需的所有工作负载/资源。
-
-多集群应用由 _系统用户_ 部署,系统用户还被指定为所有底层资源的创建者。由于实际用户可以从某个目标项目中删除,因此使用 _系统用户_ 而不是实际用户。如果实际用户从其中一个项目中删除,则该用户将不再能够管理其他项目的应用。
-
-Rancher 允许你选择**项目**或**集群**的角色选项。Rancher 将允许你根据用户的权限使用其中一个角色进行创建。
-
-- **项目** - 相当于[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _项目成员_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),则认为该用户具有所需的权限级别。
-
-- **集群** - 相当于[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _集群所有者_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md),则认为该用户具有所需的权限级别。
-
-在启动应用时,Rancher 会在启动应用之前确认你在目标项目中是否拥有这些权限。
-
-:::note
-
-某些应用(如 _Grafana_ 或 _Datadog_)需要访问特定集群级别的资源。这些应用将需要 _集群_ 角色。如果你之后发现应用需要集群角色,则可以升级多集群应用以更新角色。
-
-:::
-
-## 应用配置选项
-
-对于每个 Helm Chart,你需要输入一个必须的答案列表才能成功部署 Chart。由于 Rancher 会将答案作为 `--set` 标志传递给 Helm,因此你必须按照[使用 Helm:–set 的格式和限制](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set)中的语法规则来格式化这些答案。
-
-:::note 示例
-
-当输入的答案包含用逗号分隔的两个值(即 `abc, bcd`)时,你需要用双引号将值括起来(即 ``"abc, bcd" ``)。
-
-:::
-
-### 使用 questions.yml 文件
-
-如果你部署的 Helm Chart 包含 `questions.yml` 文件,Rancher UI 会将此文件转换成易于使用的 UI 来收集问题的答案。
-
-### 原生 Helm Chart 的键值对
-
-对于原生 Helm Chart(即来自 **Helm Stable** 或 **Helm Incubator** 应用商店或自定义 Helm Chart 仓库的 Chart),答案会在 **Answers** 中以键值对的形式提供。这些答案能覆盖默认值。
-
-### 成员
-
-默认情况下,多集群应用只能由应用的创建者管理。你可以在**成员**中添加其他用户,以便这些用户管理或查看多集群应用。
-
-1. 在**成员**搜索框中键入成员的名称,查找要添加的用户。
-
-2. 为该成员选择**访问类型**。多集群项目有三种访问类型,请仔细阅读并了解这些访问类型的含义,以了解多集群应用权限的启用方式。
-
- - **所有者**:此访问类型可以管理多集群应用的任何配置,包括模板版本、[多集群应用配置选项](#多集群应用配置选项),[应用配置选项](#应用配置选项),可以与多集群应用交互的成员,以及[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _所有者_ 都可以管理/删除[目标项目](#目标)中的应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **成员**:此访问类型只能修改模板版本、[应用配置选项](#应用配置选项)和[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _成员_ 都可以修改应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **只读**:此访问类型不能修改多集群应用的任何配置选项。用户只能查看这些应用。
-
- :::caution
-
- 请确保仅为受信任的用户授予 _所有者_ 或 _成员_ 访问权限,因为这些用户即使无法直接访问项目,也将自动能够管理为此多集群应用创建的应用。
-
- :::
-
-### 覆盖特定项目的应用配置选项
-
-多集群应用的主要优势之一,是能够在多个集群/项目中使用相同配置部署相同的应用。在某些情况下,你可能需要为某个特定项目使用稍微不同的配置选项,但你依然希望统一管理该应用与其他匹配的应用。此时,你可以为该项目覆盖特定的[应用配置选项](#应用配置选项),而不需要创建全新的应用。
-
-1. 在**答案覆盖**中,单击**添加覆盖**。
-
-2. 对于每个覆盖,你可以选择以下内容:
-
- - **范围**:在配置选项中选择要覆盖哪些目标项目的答案。
-
- - **问题**:选择要覆盖的问题。
-
- - **答案**:输入要使用的答案。
-
-## 升级多集群应用角色和项目
-
-- **在现有的多集群应用上更改角色**
- 多集群应用的创建者和任何具有“所有者”访问类型的用户都可以升级其**角色**。添加新角色时,我们会检查用户在所有当前目标项目中是否具有该角色。Rancher 会根据 `Roles` 字段的安装部分,相应地检查用户是否具有全局管理员、集群所有者或项目所有者的角色。
-
-- **添加/删除目标项目**
-1. 多集群应用的创建者和任何具有“所有者”访问类型的用户都添加或移除目标项目。添加新项目时,我们检查此请求的调用者是否具有多集群应用中定义的所有角色。Rancher 会检查用户是否具有全局管理员、集群所有者和项目所有者的角色。
-2. 删除目标项目时,我们不会进行这些成员资格检查。这是因为调用者的权限可能与目标项目有关,或者由于该项目已被删除导致调用者希望将该项目从目标列表中删除。
-
-
-## 多集群应用管理
-
-与同一类型的多个单独应用相比,使用多集群应用的好处之一是易于管理。你可以克隆、升级或回滚多集群应用。
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要对其执行操作的多集群应用,然后单击 **⋮**。选择以下选项之一:
-
- * **克隆**:创建另一个具有相同配置的多集群应用。通过使用此选项,你可以轻松复制多集群应用。
- * **升级**:升级多集群应用以更改某些配置。在为多集群应用执行升级时,如果你有合适的[访问类型](#成员),则可以修改[升级策略](#升级)。
- * **回滚**:将你的应用回滚到特定版本。如果你的一个或多个[目标](#目标)的多集群应用在升级后出现问题,你可以使用 Rancher 存储的多达 10 个多集群应用版本进行回滚。回滚多集群应用会恢复**所有**目标集群和项目的应用,而不仅仅是受升级问题影响的目标。
-
-## 删除多集群应用
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要删除的多集群应用,然后单击**⋮ > 删除**。删除多集群应用会删除所有目标项目中的所有应用和命名空间。
-
- :::note
-
- 不能独立删除在目标项目中为多集群应用创建的应用。只有删除多集群应用后才能删除这些应用。
-
- :::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
index 277961c5dae..a8a10335f6b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
@@ -6,7 +6,7 @@ title: 为高可用 K3s Kubernetes 集群设置基础设施
我们根据 Rancher 的安装位置(K3s Kubernetes 集群、RKE Kubernetes 集群或单个 Docker 容器)为专用于 Rancher 的 Kubernetes 集群推荐不同基础设施。
-有关每个安装选项的详情,请参见[本页](../../../pages-for-subheaders/installation-and-upgrade.md)。
+有关每个安装选项的详情,请参见[本页](../../../getting-started/installation-and-upgrade/installation-and-upgrade.md)。
:::note 重要提示:
@@ -21,13 +21,13 @@ title: 为高可用 K3s Kubernetes 集群设置基础设施
- **1 个负载均衡器**:用于将流量转发到这两个节点中。
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置外部数据库
+## 2. 配置外部数据库
K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
@@ -39,7 +39,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
-### 3. 配置负载均衡器
+## 3. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -62,7 +62,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
index 24387c1f786..fb64922e7eb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
@@ -18,19 +18,19 @@ title: 为高可用 RKE Kubernetes 集群设置基础设施
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到三个节点中的任意一个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -53,7 +53,7 @@ title: 为高可用 RKE Kubernetes 集群设置基础设施
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
index 0af8f17b3bb..b474cf073db 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
@@ -18,13 +18,13 @@ title: 为高可用 RKE2 Kubernetes 集群设置基础设施
- **1 个负载均衡器**:用于将流量转发到这两个节点中。
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到所有节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -47,7 +47,7 @@ title: 为高可用 RKE2 Kubernetes 集群设置基础设施
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
index 21294b1016e..bb480d6fb63 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
@@ -2,7 +2,7 @@
title: 在 Amazon EC2 中配置节点
---
-在本教程中,你将学习一种为 Rancher Mangement Server 创建 Linux 节点的方法。这些节点将满足[操作系统、Docker、硬件和网络的要求](../../../pages-for-subheaders/installation-requirements.md)。
+在本教程中,你将学习一种为 Rancher Mangement Server 创建 Linux 节点的方法。这些节点将满足[操作系统、Docker、硬件和网络的要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
如果 Rancher Server 安装在 RKE Kubernetes 集群上,你需要配置三个实例。
@@ -10,12 +10,12 @@ title: 在 Amazon EC2 中配置节点
如果 Rancher Server 安装在单个 Docker 容器中,你只需要配置一个实例。
-### 1. 准备工作(可选)
+## 1. 准备工作(可选)
-- **创建 IAM 角色**:要允许 Rancher 操作 AWS 资源,例如创建新存储或新节点,你需要将 Amazon 配置为云提供商。要在 EC2 上设置云提供商,你需要进行几个操作,其中包括为 Rancher Server 节点设置 IAM 角色。有关设置云提供商的详情,请参见[本页](../../../pages-for-subheaders/set-up-cloud-providers.md)。
-- **创建安全组**:我们建议为 Rancher 节点设置一个符合 [Rancher 节点端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+- **创建 IAM 角色**:要允许 Rancher 操作 AWS 资源,例如创建新存储或新节点,你需要将 Amazon 配置为云提供商。要在 EC2 上设置云提供商,你需要进行几个操作,其中包括为 Rancher Server 节点设置 IAM 角色。有关设置云提供商的详情,请参见[本页](../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。
+- **创建安全组**:我们建议为 Rancher 节点设置一个符合 [Rancher 节点端口要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)的安全组。
-### 2. 配置实例
+## 2. 配置实例
1. 登录到 [Amazon AWS EC2 控制台](https://console.aws.amazon.com/ec2/)。由于 Rancher Management Server 的所有基础设施都需要位于同一区域,因此,请务必记下创建 EC2 实例(Linux 节点)的**区域**。
1. 在左侧面板中,点击**实例**。
@@ -26,7 +26,7 @@ title: 在 Amazon EC2 中配置节点
1. 在**实例数量**字段中,输入实例数量。创建高可用 K3s 集群仅需要两个实例,而高可用 RKE 集群则需要三个实例。
1. 可选:如果你为 Rancher 创建了一个 IAM 角色来操作 AWS 资源,请在 **IAM 角色**字段中选择新 IAM 角色。
1. 分别点击**下一步:添加存储**,**下一步:添加标签**和**下一步:配置安全组**。
-1. 在**步骤 6:配置安全组**中,选择一个符合 Rancher 节点[端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+1. 在**步骤 6:配置安全组**中,选择一个符合 Rancher 节点[端口要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)的安全组。
1. 点击**查看并启动**。
1. 点击**启动**。
1. 选择一个新的或现有的密钥对,用于之后连接到你的实例。如果使用现有密钥对,请确保你有访问私钥的权限。
@@ -41,7 +41,7 @@ title: 在 Amazon EC2 中配置节点
:::
-### 3. 为 RKE Kubernetes 集群节点安装 Docker 并创建用户
+## 3. 为 RKE Kubernetes 集群节点安装 Docker 并创建用户
1. 在 [AWS EC2 控制台](https://console.aws.amazon.com/ec2/)中,点击左侧面板中的**实例**。
1. 转到你想要安装 Docker 的实例。选择实例,并点击**操作 > 连接**。
@@ -67,7 +67,7 @@ sudo usermod -aG docker ubuntu
**结果**:你已配置满足操作系统、Docker、硬件和网络要求的 Rancher Server 节点。
-### RKE Kubernetes 集群节点的后续步骤
+## RKE Kubernetes 集群节点的后续步骤
如需在新节点上安装 RKE 集群,请记住每个节点的 **IPv4 公共 IP** 和 **私有 IP**。创建节点后,此信息可以在每个节点的**描述**选项卡中找到。公共和私有 IP 将用于设置 RKE 集群配置文件 `rancher-cluster.yml` 中每个节点的 `address` 和 `internal_address`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
index 2f28274e31b..52841cefbcf 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
@@ -10,9 +10,9 @@ title: 高可用安装
Rancher Server 的数据存储在 etcd 中。etcd 数据库可以在所有三个节点上运行。为了选举出大多数 etcd 节点认同的 etcd 集群 leader,节点的数量需要是奇数。如果 etcd 数据库不能选出 leader,etcd 可能会失败。这时候就需要使用备份来还原集群。
-有关 Rancher 如何工作的详情(与安装方法无关),请参见[架构](../../../pages-for-subheaders/rancher-manager-architecture.md)。
+有关 Rancher 如何工作的详情(与安装方法无关),请参见[架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)。
-### 推荐架构
+## 推荐架构
- Rancher 的 DNS 应该解析为 4 层负载均衡器。
- 负载均衡器应该把 TCP/80 端口和 TCP/443 端口的流量转发到 Kubernetes 集群的全部 3 个节点上。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
index 397fe2aa0d0..0ad61709567 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
@@ -10,7 +10,7 @@ Rancher 可以运行在任何 Kubernetes 集群上,包括托管的 Kubernetes
:::
-如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
:::tip 单节点安装提示:
@@ -189,5 +189,5 @@ kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed
### 后续操作
-[安装 Rancher](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)
+[安装 Rancher](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index b1b5457ecf1..0347c6e10ba 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -14,15 +14,15 @@ title: 生产就绪集群检查清单
如需获取推荐的所有最佳实践的完整列表,请参阅[最佳实践](../../../../reference-guides/best-practices/best-practices.md)部分。
-### 节点要求
+## 节点要求
* 确保你的节点满足所有[节点要求](../node-requirements-for-rancher-managed-clusters.md),包括端口要求。
-### 备份 etcd
+## 备份 etcd
* 启用 etcd 快照。验证是否正在创建快照,并执行灾难恢复方案,从而验证快照是否有效。etcd 是存储集群状态的位置,丢失 etcd 数据意味着丢失集群。因此,请确保为集群配置 etcd 的定期快照,并确保快照也是存储在外部(节点外)的。
-### 集群架构
+## 集群架构
* 节点应具有以下角色配置之一:
* `etcd`
@@ -37,16 +37,16 @@ title: 生产就绪集群检查清单
有关每个 Kubernetes 角色的节点数的详细信息,请参阅[推荐架构](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md)部分。
-### Logging 和 Monitoring
+## Logging 和 Monitoring
* 为 Kubernetes 组件(系统服务)配置告警/通知程序。
* 为集群分析和事后剖析配置 Logging。
-### 可靠性
+## 可靠性
* 在集群上执行负载测试,以验证硬件是否可以支持你的工作负载。
-### 网络
+## 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](../set-up-cloud-providers/set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
index a0ec43f76c5..6473225d264 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -53,7 +53,7 @@ title: 推荐的集群架构
参考:
-* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance)
+* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.5/faq/#what-is-failure-tolerance)
* [为 Kubernetes 操作 etcd 集群的官方 Kubernetes 文档](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
### Worker 节点数
@@ -62,7 +62,7 @@ title: 推荐的集群架构
### 为什么 Rancher 集群和运行应用的集群的生产要求不同
-你可能已经注意到我们的 [Kubernetes 安装](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)说明并不符合我们对生产就绪集群的要求,这是因为 `worker` 角色没有专用节点。然而,你 Rancher 中的这个三节点集群是有效的,因为:
+你可能已经注意到我们的 [Kubernetes 安装](../../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)说明并不符合我们对生产就绪集群的要求,这是因为 `worker` 角色没有专用节点。然而,你 Rancher 中的这个三节点集群是有效的,因为:
* 它允许一个 `etcd` 节点故障。
* 它通过多个 `controlplane` 节点来维护 master 组件的多个实例。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
index 1f929eb52f9..cac53d51604 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
@@ -4,7 +4,7 @@ title: Kubernetes 中节点的角色
本节介绍 Kubernetes 中 etcd 节点、controlplane 节点和 worker 节点的角色,以及这些角色如何在集群中协同工作。
-此图适用于 [Rancher 通过 RKE 部署的 Kubernetes 集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md):
+此图适用于 [Rancher 通过 RKE 部署的 Kubernetes 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md):

线条表示组件之间的通信。而颜色纯粹用于视觉辅助。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
index 8cd13712b03..f3a682fcfae 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
@@ -13,7 +13,7 @@ Rancher 允许你通过 Rancher UI 来创建集群,从而简化了集群的创
有关 Rancher Server 配置集群的方式,以及使用什么工具来创建集群的详细信息,请参阅[产品架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)页面。
-### 不同类型集群的管理功能
+## 不同类型集群的管理功能
下表总结了每一种类型的集群和对应的可编辑的选项和设置:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
index 3649e928567..bda5e415a7d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
@@ -12,7 +12,7 @@ Kubernetes 不再在树内维护云提供商。vSphere 有一个树外云提供
它遵循官方 [vSphere 迁移文档](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html)中提供的步骤,并提供在 Rancher 中执行的步骤。
-### Cloud-config 格式限制
+## Cloud-config 格式限制
由于 vSphere Cloud Storage Interface (CSI) 中的一个现有错误,使用以下 cloud-config 格式配置的现有卷将无法迁移。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
index 85bcf4b7b11..4d44e8df40e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
@@ -6,7 +6,7 @@ title: Rancher 管理集群的节点要求
:::note
-如果 Rancher 安装在高可用的 Kubernetes 集群上,Rancher Server 的三节点集群和下游集群有不同的要求。有关 Rancher 的安装要求,请参考[安装文档](../../../pages-for-subheaders/installation-requirements.md)中的节点要求。
+如果 Rancher 安装在高可用的 Kubernetes 集群上,Rancher Server 的三节点集群和下游集群有不同的要求。有关 Rancher 的安装要求,请参考[安装文档](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)中的节点要求。
:::
@@ -43,7 +43,7 @@ SUSE Linux 可能有一个防火墙,默认情况下会阻止所有端口。在
### Flatcar Container Linux 节点
-使用 Flatcar Container Linux 节点[通过 Rancher 启动 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 时,需要在 [Cluster Config 文件](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)中使用如下配置:
+使用 Flatcar Container Linux 节点[通过 Rancher 启动 Kubernetes](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 时,需要在 [Cluster Config 文件](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)中使用如下配置:
@@ -88,13 +88,13 @@ rancher_kubernetes_engine_config:
systemctl enable docker.service
```
-使用[主机驱动](../../../pages-for-subheaders/about-provisioning-drivers.md#主机驱动)时会自动启用 Docker 服务。
+使用[主机驱动](../authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)时会自动启用 Docker 服务。
### Windows 节点
运行 Windows Server 节点必须使用 Docker 企业版。
-Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集群](../../../pages-for-subheaders/use-windows-clusters.md)。
+Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集群](./use-windows-clusters/use-windows-clusters.md)。
## 硬件要求
@@ -104,13 +104,13 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
有关大型 Kubernetes 集群的硬件建议,请参阅[构建大型集群](https://kubernetes.io/docs/setup/best-practices/cluster-large/)的官方 Kubernetes 文档。
-有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.4.0/op-guide/hardware/)。
+有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.5/op-guide/hardware/)。
## 网络要求
对于生产集群,我们建议你通过仅打开以下端口要求中定义的端口来限制流量。
-需要开放的端口根据下游集群的启动方式而有所不同。以下列出了需要为不同[集群创建选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)打开的端口。
+需要开放的端口根据下游集群的启动方式而有所不同。以下列出了需要为不同[集群创建选项](./kubernetes-clusters-in-rancher-setup.md)打开的端口。
有关 Kubernetes 集群中 etcd 节点、controlplane 节点和 Worker 节点的端口要求的详细信息,请参阅 [Rancher Kubernetes Engine 的端口要求](https://rancher.com/docs/rke/latest/en/os/#ports)。
@@ -126,4 +126,4 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
如果你要配置符合 CIS(互联网安全中心)Kubernetes 基准的 Kubernetes 集群,我们建议你在安装 Kubernetes 之前按照我们的强化指南来配置节点。
-有关强化指南的更多信息,以及了解哪个指南版本对应于你的 Rancher 和 Kubernetes 版本,请参阅[安全](../../../pages-for-subheaders/rancher-security.md#rancher-强化指南)。
+有关强化指南的更多信息,以及了解哪个指南版本对应于你的 Rancher 和 Kubernetes 版本,请参阅[安全](../../../reference-guides/rancher-security/rancher-security.md#rancher-加固指南)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
index e3857d23024..3da44bb58b7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
@@ -13,7 +13,7 @@ Rancher 管理注册集群的范围取决于集群的类型。详情请参见[
已注册的 RKE Kubernetes 集群必须具有所有三个节点角色,分别是 etcd、controlplane 和 worker。只有 controlplane 组件的集群无法在 Rancher 中注册。
-有关 RKE 节点角色的更多信息,请参阅[最佳实践](../../../pages-for-subheaders/checklist-for-production-ready-clusters.md#集群架构)。
+有关 RKE 节点角色的更多信息,请参阅[最佳实践](./checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md#集群架构)。
### 权限
@@ -106,9 +106,9 @@ Rancher 管理注册集群的范围取决于集群的类型。
注册集群后,集群所有者可以:
- 通过 RBAC [管理集群访问](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)
-- 启用[Monitoring、告警和 Notifiers](../../../pages-for-subheaders/monitoring-and-alerting.md)
-- 启用 [Logging](../../../pages-for-subheaders/logging.md)
-- 启用 [Istio](../../../pages-for-subheaders/istio.md)
+- 启用[Monitoring、告警和 Notifiers](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)
+- 启用 [Logging](../../../integrations-in-rancher/logging/logging.md)
+- 启用 [Istio](../../../integrations-in-rancher/istio/istio.md)
- 管理项目和工作负载
### 已注册 RKE2 和 K3s 集群的附加功能
@@ -133,7 +133,7 @@ Rancher 处理注册的 EKS、AKS 或 GKE 集群的方式与处理在 Rancher
如果你在 Rancher 中创建 EKS、AKS 或 GKE 集群,然后将其删除,Rancher 会销毁该集群。通过 Rancher 删除已注册的集群时,Rancher Server 会_断开_与集群的连接。该集群仍然存在,只是它不再在 Rancher 中。你仍然可以像注册前一样访问已注销的集群。
-有关可用于管理已注册集群的功能,请参阅[按集群类型划分的集群管理功能](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)。
+有关可用于管理已注册集群的功能,请参阅[按集群类型划分的集群管理功能](./kubernetes-clusters-in-rancher-setup.md)。
## 配置 RKE2 和 K3s 集群升级
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
index 3c31569bf95..a00f8642776 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
@@ -21,7 +21,7 @@ weight: 1
:::
-### 1. 创建 IAM 角色并附加到实例
+## 1. 创建 IAM 角色并附加到实例
添加到集群的所有节点都必须能够与 EC2 交互,以便它们可以创建和删除资源。你可以使用附加到实例的 IAM 角色来启用交互。请参阅 [Amazon 文档:创建 IAM 角色](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) 来创建 IAM 角色。有两个示例策略:
@@ -30,7 +30,7 @@ weight: 1
在创建 [Amazon EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)时,你必须在创建**节点模板**时填写创建的 IAM 角色的 **IAM Instance Profile Name**(不是 ARN)。
-创建[自定义集群](../../../../pages-for-subheaders/use-existing-nodes.md)时,你必须手动将 IAM 角色附加到实例。
+创建[自定义集群](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)时,你必须手动将 IAM 角色附加到实例。
具有 `controlplane` 角色的节点的 IAM 策略:
@@ -129,7 +129,7 @@ weight: 1
}
```
-### 2. 创建 ClusterID
+## 2. 创建 ClusterID
以下资源需要使用 `ClusterID` 进行标记:
@@ -155,6 +155,574 @@ weight: 1
**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`.
-### 使用 Amazon Elastic Container Registry (ECR)
+## 使用 Amazon Elastic Container Registry (ECR)
在将[创建 IAM 角色并附加到实例](#1-创建-iam-角色并附加到实例)中的 IAM 配置文件附加到实例时,kubelet 组件能够自动获取 ECR 凭证。使用低于 v1.15.0 的 Kubernetes 版本时,需要在集群中配置 Amazon 云提供商。从 Kubernetes 版本 v1.15.0 开始,kubelet 无需在集群中配置 Amazon 云提供商即可获取 ECR 凭证。
+
+## Using the Out-of-Tree AWS Cloud Provider
+
+
+
+
+1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for the cloud provider to find the instance correctly.
+
+2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object:
+
+```yaml
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ cloud-provider-name: aws
+```
+
+This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally:
+
+
+**Override on Etcd:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/etcd-role
+ operator: In
+ values:
+ - 'true'
+```
+
+**Override on Control Plane:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ disable-cloud-controller: true
+ kube-apiserver-arg:
+ - cloud-provider=external
+ kube-controller-manager-arg:
+ - cloud-provider=external
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/control-plane-role
+ operator: In
+ values:
+ - 'true'
+```
+
+**Override on Worker:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/worker-role
+ operator: In
+ values:
+ - 'true'
+```
+
+2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components.
+
+3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install:
+
+```yaml
+spec:
+ rkeConfig:
+ additionalManifest: |-
+ apiVersion: helm.cattle.io/v1
+ kind: HelmChart
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ spec:
+ chart: aws-cloud-controller-manager
+ repo: https://kubernetes.github.io/cloud-provider-aws
+ targetNamespace: kube-system
+ bootstrap: true
+ valuesContent: |-
+ hostNetworking: true
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: "true"
+ args:
+ - --configure-cloud-routes=false
+ - --v=5
+ - --cloud-provider=aws
+```
+
+
+
+
+
+1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`.
+
+:::note
+
+If you use IP-based naming, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention.
+
+:::
+
+To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when the `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`:
+
+```yaml
+rancher_kubernetes_engine_config:
+ cloud_provider:
+ name: external-aws
+ useInstanceMetadataHostname: true
+```
+
+You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md), add [`--node-name`](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) to the `docker run` node registration command to set `hostname-override` — for example, `"$(hostname -f)"`. This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**.
+
+2. Select the cloud provider.
+
+Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`.
+
+:::note
+
+You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`.
+
+:::
+
+```yaml
+rancher_kubernetes_engine_config:
+ cloud_provider:
+ name: external-aws
+ useInstanceMetadataHostname: true/false
+```
+
+Existing clusters that use an **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name.
+
+3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done manually, or via [Helm charts in UI](#helm-chart-installation-from-ui).
+
+Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws).
+
+
+
+
+## Helm Chart Installation from CLI
+
+
+
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
+
+1. Add the Helm repository:
+
+```shell
+helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws
+helm repo update
+```
+
+2. Create a `values.yaml` file with the following contents to override the default `values.yaml`:
+
+```yaml
+# values.yaml
+hostNetworking: true
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/control-plane
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+args:
+ - --configure-cloud-routes=false
+ - --use-service-account-credentials=true
+ - --v=2
+ - --cloud-provider=aws
+clusterRoleRules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+```
+
+3. Install the Helm chart:
+
+```shell
+helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml
+```
+
+Verify that the Helm chart installed successfully:
+
+```shell
+helm status -n kube-system aws-cloud-controller-manager
+```
+
+4. (Optional) Verify that the cloud controller manager update succeeded:
+
+```shell
+kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+```
+
+
+
+
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
+
+1. Add the Helm repository:
+
+```shell
+helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws
+helm repo update
+```
+
+2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`:
+
+```yaml
+# values.yaml
+hostNetworking: true
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/controlplane
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+args:
+ - --configure-cloud-routes=false
+ - --use-service-account-credentials=true
+ - --v=2
+ - --cloud-provider=aws
+clusterRoleRules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+```
+
+3. Install the Helm chart:
+
+```shell
+helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml
+```
+
+Verify that the Helm chart installed successfully:
+
+```shell
+helm status -n kube-system aws-cloud-controller-manager
+```
+
+4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`:
+
+```shell
+kubectl edit daemonset aws-cloud-controller-manager -n kube-system
+```
+
+5. (Optional) Verify that the cloud controller manager update succeeded:
+
+```shell
+kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+```
+
+
+
+
+## Helm Chart Installation from UI
+
+
+
+
+1. Click **☰**, then select the name of the cluster from the left navigation.
+
+2. Select **Apps** > **Repositories**.
+
+3. Click the **Create** button.
+
+4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field.
+
+5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**.
+
+6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**.
+
+7. Add the following container arguments:
+
+```yaml
+ - '--use-service-account-credentials=true'
+ - '--configure-cloud-routes=false'
+```
+
+8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup.
+
+```yaml
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+```
+
+9. Rancher-provisioned RKE2 nodes are tainted `node-role.kubernetes.io/control-plane`. Update tolerations and the nodeSelector:
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/control-plane
+
+```
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+```
+
+:::note
+
+There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the Daemonset manually to set the `nodeSelector`:
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+```
+
+:::
+
+10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6).
+
+
+
+
+
+1. Click **☰**, then select the name of the cluster from the left navigation.
+
+2. Select **Apps** > **Repositories**.
+
+3. Click the **Create** button.
+
+4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field.
+
+5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**.
+
+6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**.
+
+7. Add the following container arguments:
+
+```yaml
+ - '--use-service-account-credentials=true'
+ - '--configure-cloud-routes=false'
+```
+
+8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup:
+
+```yaml
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+```
+
+9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector:
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/controlplane
+
+```
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+```
+
+:::note
+
+There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`:
+
+``` yaml
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+```
+
+:::
+
+10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully:
+
+```shell
+kubectl rollout status deployment -n kube-system aws-cloud-controller-manager
+```
+
+
+
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
index 065ff2deb84..cda5dbafed4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
@@ -8,7 +8,7 @@ Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外
本文遵循官方 [vSphere 迁移文档](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html)中提供的步骤,并介绍了要在 Rancher 中执行的步骤。
-### Cloud-config 格式限制
+## Cloud-config 格式限制
由于 vSphere CSI 中的现有错误,使用以下 cloud-config 格式配置的现有卷将不会迁移。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
index 7508802ff08..2dff10dc67c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
@@ -21,23 +21,23 @@ _cloud provider_ 是 Kubernetes 中的一个模块,它提供了一个用于管
* GCE (Google Compute Engine)
* vSphere
-### 设置 Amazon 云提供商
+## 设置 Amazon 云提供商
有关启用 Amazon 云提供商的详细信息,请参阅[此页面](amazon.md)。
-### 设置 Azure 云提供商
+## 设置 Azure 云提供商
有关启用 Azure 云提供商的详细信息,请参阅[此页面](azure.md)。
-### 设置 GCE 云提供商
+## 设置 GCE 云提供商
有关启用 Google Compute Engine 云提供商的详细信息,请参阅[此页面](google-compute-engine.md)。
-### 设置 vSphere 云提供商
+## 设置 vSphere 云提供商
有关启用 vSphere 云提供商的详细信息,请参阅[树内 vSphere 配置](configure-in-tree-vsphere.md) 和 [树外 vSphere 配置](configure-out-of-tree-vsphere.md)。
-### 设置自定义云提供商
+## 设置自定义云提供商
如果您想配置其他 Kubernetes 云提供商,则可使用 `自定义` 云提供商。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
index 4369666a95d..d3a15f44ea7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
@@ -146,7 +146,7 @@ Rancher 可以通过以下两种方式之一连接到私有 AKS 集群。
AKS 配置者可以在 Rancher 和提供商之间同步 AKS 集群的状态。有关其工作原理的技术说明,请参阅[同步](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)。
-有关配置刷新间隔的信息,请参阅[本节](../../../../pages-for-subheaders/gke-cluster-configuration.md#配置刷新间隔)。
+有关配置刷新间隔的信息,请参阅[本节](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md#配置刷新间隔)。
## 以编程方式创建 AKS 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
index ee3f3397dc6..f0168f45262 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
@@ -59,7 +59,7 @@ title: 创建 GKE 集群
1. 可选:使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 可选:将 Kubernetes [标签](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)或[注释](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)添加到集群。
1. 输入你的 Google 项目 ID 和 Google 云凭证。
-1. 完成表单的其余部分。如需帮助,请参阅 [GKE 集群配置参考](../../../../pages-for-subheaders/gke-cluster-configuration.md)。
+1. 完成表单的其余部分。如需帮助,请参阅 [GKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)。
1. 单击**创建**。
**结果**:你已成功部署 GKE 集群。
@@ -79,7 +79,7 @@ title: 创建 GKE 集群
## 配置参考
-有关在 Rancher 中配置 GKE 集群的详细信息,请参阅[此页面](../../../../pages-for-subheaders/gke-cluster-configuration.md)。
+有关在 Rancher 中配置 GKE 集群的详细信息,请参阅[此页面](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)。
## 更新 Kubernetes 版本
集群的 Kubernetes 版本可以升级到 GKE 集群所在区域或地区中可用的任何版本。升级 Kubernetes 主版本不会自动升级 Worker 节点。节点可以独立升级。
@@ -94,7 +94,7 @@ GKE 在 1.19+ 中取消了基本身份验证。要将集群升级到 1.19+,必
GKE 配置者可以在 Rancher 和提供商之间同步 GKE 集群的状态。有关其工作原理的技术说明,请参阅[同步](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)。
-有关配置刷新间隔的信息,请参阅[本节](../../../../pages-for-subheaders/gke-cluster-configuration.md#配置刷新间隔)。
+有关配置刷新间隔的信息,请参阅[本节](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md#配置刷新间隔)。
## 以编程方式创建 GKE 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
index 3a5ac26045c..bfe8fd56a59 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
@@ -4,7 +4,7 @@ title: Host Gateway (L2bridge) 的网络要求
本节介绍如何配置使用 *Host Gateway (L2bridge)* 模式的自定义 Windows 集群。
-### 禁用私有 IP 地址检查
+## 禁用私有 IP 地址检查
如果你使用 *Host Gateway (L2bridge)* 模式,并将节点托管在下面列出的云服务上,则必须在启动时禁用 Linux 或 Windows 主机的私有 IP 地址检查。要为每个节点禁用此检查,请按照以下各个云服务对应的说明进行操作:
@@ -14,7 +14,7 @@ title: Host Gateway (L2bridge) 的网络要求
| Google GCE | [为实例启用 IP 转发](https://cloud.google.com/vpc/docs/using-routes#canipforward)(默认情况下,VM 无法转发由另一个 VM 发起的数据包) |
| Azure VM | [启用或禁用 IP 转发](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) |
-### 云托管虚拟机的路由配置
+## 云托管虚拟机的路由配置
如果是使用 Flannel 的 [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) 后端,同一节点上的所有容器都属于私有子网,流量通过主机网络从一个节点上的子网路由到在另一个节点上的子网。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
index 51662df0829..d48f0484231 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
@@ -21,7 +21,7 @@ Windows 集群的其他要求如下:
有关支持 Windows 的 Kubernetes 功能摘要,请参阅[在 Windows 中使用 Kubernetes 支持的功能和限制](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations)的 Kubernetes 文档,或[在 Kubernetes 中调度 Windows 容器的指南](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/)。
-### RKE2 Windows
+## RKE2 Windows
RKE2 配置功能还包括在 Windows 集群上安装 RKE2。RKE2 的 Windows 功能包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
index 1c4c314708d..ccef1846703 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
@@ -11,7 +11,7 @@ description: 了解如何添加 SSL 证书或 TLS 证书
:::
-### 1. 创建一个密文
+## 1. 创建一个密文
1. 在左上角,单击 **☰ > 集群管理**。
@@ -23,7 +23,7 @@ description: 了解如何添加 SSL 证书或 TLS 证书
1. 在**证书**字段中,将你的证书复制并粘贴到文本框中(包括标头和页脚),或者单击**从文件读取**选择文件系统上的证书文件。如果可能,我们建议使用**从文件读取**以减少出错的可能性。请注意,证书文件的扩展名是 `.crt`。
1. 单击**创建**。
-### 2. 将密文添加到 Ingress
+## 2. 将密文添加到 Ingress
1. 在左上角,单击 **☰ > 集群管理**。
1. 转到要部署 Ingress 的集群,然后单击**服务发现 > Ingress**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
index 4dd86bb2a6c..8b80ea3272d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
@@ -30,10 +30,10 @@ title: Kubernetes 资源
Rancher 支持两种类型的负载均衡器:
-- [Layer-4 负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer#四层负载均衡器)
-- [Layer-7 负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#七层负载均衡器)
+- [Layer-4 负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#四层负载均衡器)
+- [Layer-7 负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#七层负载均衡器)
-有关详细信息,请参阅[负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md)。
+有关详细信息,请参阅[负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md)。
#### Ingress
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
index 16ae92f17ee..036f7a617c0 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
@@ -17,7 +17,7 @@ description: 阅读此步骤指南以部署工作负载。部署工作负载以
1. 选择现有命名空间,或单击**添加到新命名空间**并输入新命名空间。
-1. 点击**添加端口**进入端口映射,这让你可以访问集群内外的应用程序。如需更多信息,请参阅 [Service](../../../../pages-for-subheaders/workloads-and-pods.md#services)。
+1. 点击**添加端口**进入端口映射,这让你可以访问集群内外的应用程序。如需更多信息,请参阅 [Service](./workloads-and-pods.md#services)。
1. 配置其余选项:
@@ -41,7 +41,7 @@ description: 阅读此步骤指南以部署工作负载。部署工作负载以
- 在 [AWS](https://aws.amazon.com/) 中,节点必须位于同一可用区中并具有附加/分离卷的 IAM 权限。
- - 集群必须使用 [AWS 云提供商](https://github.com/kubernetes/website/blob/release-1.18/content/en/docs/concepts/cluster-administration/cloud-providers.md#aws)选项。有关启用此选项的更多信息,请参阅[创建 AWS EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)或[创建自定义集群](../../../../pages-for-subheaders/use-existing-nodes.md)。
+ - 集群必须使用 [AWS 云提供商](https://github.com/kubernetes/website/blob/release-1.18/content/en/docs/concepts/cluster-administration/cloud-providers.md#aws)选项。有关启用此选项的更多信息,请参阅[创建 AWS EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)或[创建自定义集群](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
index 08be0859bb8..19ba2245972 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
@@ -9,16 +9,16 @@ description: "了解在 Kubernetes 中构建复杂容器化应用程序的两种
你可以使用两种基本结构(pod 和工作负载)在 Kubernetes 中构建复杂的容器化应用程序。构建应用程序后,你可以使用第三种结构(service)在集群中或互联网上公开应用程序。
-### Pod
+## Pod
[_Pod_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) 是一个或多个共享网络命名空间和存储卷的容器。大多数 pod 只有一个容器。因此,我们讨论的 _pod_ 通常等同于 _容器_。扩展 pod 的方式与扩展容器的方式相同,即配置实现服务的同一 pod 的多个实例。通常,Pod 会根据工作负载进行扩展和管理。
-### 工作负载
+## 工作负载
_工作负载_ 是为 pod 设置部署规则的对象。Kubernetes 基于这些规则执行部署,并根据应用程序的当前状态来更新工作负载。
工作负载让你可以定义应用程序调度、扩展和升级的规则。
-#### 工作负载类型
+### 工作负载类型
Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行的类型是:
@@ -42,7 +42,7 @@ Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行
_CronJobs_ 与 Job 类似。但是,CronJob 会基于 cron 的计划运行到完成状态。
-### Services
+## Services
在许多用例中,工作负载必须:
@@ -51,7 +51,7 @@ Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行
你可以通过创建一个 _Service_ 实现这些目的。Service 使用[选择器/标签(查看代码示例)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller)来映射到底层工作负载的 pod。Rancher UI 使用你选择的服务端口和类型来自动创建 service 以及工作负载,从而简化此映射过程。
-#### Service 类型
+### Service 类型
Rancher 中有几种可用的 Service 类型。以下描述来自 [Kubernetes 文档](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
index 15263fff292..e01f1b6fc24 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
@@ -7,17 +7,17 @@ Rancher 管理的集群上部署了两种不同的 Agent 资源:
- [cattle-cluster-agent](#cattle-cluster-agent)
- [cattle-node-agent](#cattle-node-agent)
-有关 Rancher Server 如何配置集群并与集群通信的概述,请参阅[产品架构](../../../pages-for-subheaders/rancher-manager-architecture.md)。
+有关 Rancher Server 如何配置集群并与集群通信的概述,请参阅[产品架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)。
-### cattle-cluster-agent
+## cattle-cluster-agent
-`cattle-cluster-agent` 用于连接 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。`cattle-cluster-agent` 使用 Deployment 资源进行部署。
+`cattle-cluster-agent` 用于连接 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。`cattle-cluster-agent` 使用 Deployment 资源进行部署。
-### cattle-node-agent
+## cattle-node-agent
-`cattle-node-agent` 用于在执行集群操作时与 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群中的节点进行交互。集群操作包括升级 Kubernetes 版本和创建/恢复 etcd 快照。`cattle-node-agent` 使用 DaemonSet 资源进行部署,以确保能在每个节点上运行。当 `cattle-cluster-agent` 不可用时,`cattle-node-agent` 可以作为备选方案,用来连接 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。
+`cattle-node-agent` 用于在执行集群操作时与 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群中的节点进行交互。集群操作包括升级 Kubernetes 版本和创建/恢复 etcd 快照。`cattle-node-agent` 使用 DaemonSet 资源进行部署,以确保能在每个节点上运行。当 `cattle-cluster-agent` 不可用时,`cattle-node-agent` 可以作为备选方案,用来连接 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。
-### 调度规则
+## 调度规则
`cattle-cluster-agent` 使用一组固定的容忍度,或基于应用于 control plane 节点的污点动态添加的容忍度。这种结构允许[基于污点进行驱逐](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions)为 `cattle-cluster-agent` 正常工作。
@@ -28,7 +28,7 @@ Rancher 管理的集群上部署了两种不同的 Agent 资源:
| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **注意**:这些是默认容忍度,并将替换为与 controlplane 节点的污点匹配的容忍度。 `effect:NoSchedule` `key:node-role.kubernetes.io/controlplane` `value:true` `effect:NoSchedule` `key:node-role.kubernetes.io/control-plane` `operator:Exists` `effect:NoSchedule` `key:node-role.kubernetes.io/master` `operator:Exists` |
| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` |
-`cattle-cluster-agent` Deployment 使用 `preferredDuringSchedulingIgnoredDuringExecution` 的首选调度规则,倾向于在具有 `controlplane` 节点的节点上进行调度。当集群中没有可见的 controlplane 节点时(通常是使用[提供商托管的 Kubernetes 的集群](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)),你可以在节点上添加 `cattle.io/cluster-agent=true` 标签,从而优先将 `cattle-cluster-agent` pod 调度到该节点。
+`cattle-cluster-agent` Deployment 使用 `preferredDuringSchedulingIgnoredDuringExecution` 的首选调度规则,倾向于在具有 `controlplane` 节点的节点上进行调度。当集群中没有可见的 controlplane 节点时(通常是使用[提供商托管的 Kubernetes 的集群](../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)),你可以在节点上添加 `cattle.io/cluster-agent=true` 标签,从而优先将 `cattle-cluster-agent` pod 调度到该节点。
有关调度规则的更多信息,请参阅 [Kubernetes:将 Pod 分配给节点](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
index 3941f3b37c3..3ea34605985 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
@@ -6,11 +6,11 @@ RKE2,也称为 RKE Government,是一个完全符合标准的 Kubernetes 发
RKE1 和 RKE2 有一些细微的差异,本文将重点介绍这些差异。
-### controlplane 组件
+## controlplane 组件
RKE1 使用 Docker 来部署和管理 controlplane 组件,还使用 Docker 作为 Kubernetes 的容器运行时。相比之下,RKE2 将 controlplane 组件作为由 kubelet 管理的静态 pod 启动。RKE2 的容器运行时是 Containerd,它允许 Mirror 容器镜像仓库等内容。使用 Docker 的 RKE1 不允许 Mirror。
-### Cluster API
+## Cluster API
RKE2/K3s 配置是基于 Cluster API (CAPI) 上游框架之上构建的,这导致 RKE2 配置的集群的行为通常与 RKE1 配置的集群不同。
@@ -22,10 +22,6 @@ RKE2/K3s 配置是基于 Cluster API (CAPI) 上游框架之上构建的,这导
如果你是习惯于 RKE1 配置的用户,请注意新的 RKE2 行为。
-### 名词解释
+## 名词解释
从 RKE1 到 RKE2,某些术语已更改或已不再使用。例如,在 RKE1中,你使用**节点模板**,而在 RKE2 中,你可以在创建或编辑集群时配置集群节点池。另一个例子是 RKE1 中的**节点池(node pool)** 现在在 RKE2 中称为**主机池(machine pool)**。
-
-
-
-
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
index 39f8b418151..656d9bd74a6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
@@ -28,7 +28,7 @@ title: 创建 DigitalOcean 集群
### 2. 使用云凭证创建节点模板
-为 DigitalOcean 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板) 会允许 Rancher 在 DigitalOcean 中配置新节点。其他集群可以复用节点模板。
+为 DigitalOcean 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板) 会允许 Rancher 在 DigitalOcean 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -42,7 +42,7 @@ title: 创建 DigitalOcean 集群
1. 在**集群**页面上,单击**创建**。
1. 单击 **DigitalOcean**。
1. 输入**集群名称**。
-1. 将一个或多个节点池添加到你的集群。将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 将一个或多个节点池添加到你的集群。将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 在**集群配置**中,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 单击**创建**。
@@ -71,7 +71,7 @@ title: 创建 DigitalOcean 集群
1. 单击 **DigitalOcean**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [DigitalOcean 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
index b2918cf475a..c7fe813394f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
@@ -9,12 +9,12 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
然后,在 Rancher 中创建一个 EC2 集群,并在配置新集群时为集群定义节点池。每个节点池都有一个 etcd、controlplane 或 worker 的 Kubernetes 角色。Rancher 会在新节点上安装 RKE Kubernetes,并为每个节点设置节点池定义的 Kubernetes 角色。
-### 先决条件
+## 先决条件
- **AWS EC2 访问密钥和密文密钥**,用于创建实例。请参阅 [Amazon 文档:创建访问密钥](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey)来创建访问密钥和密文密钥。
- **已创建 IAM 策略**,用于为用户添加的访问密钥和密文密钥。请参阅 [Amazon 文档:创建 IAM 策略(控制台)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start)来创建 IAM 策略。参阅下面的三个示例 JSON 策略:
- [IAM 策略示例](#iam-策略示例)
- - [带有 PassRole 的 IAM 策略示例](#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
+ - [带有 PassRole 的 IAM 策略示例](#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
- [允许加密 EBS 卷的 IAM 策略示例](#允许加密-ebs-卷的-iam-策略示例)
- 为用户添加 **IAM 策略权限**。请参阅 [Amazon 文档:为用户添加权限(控制台)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console),来将权限添加给用户。
@@ -44,7 +44,7 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
### 2. 使用云凭证和 EC2 的信息来创建节点模板
-为 EC2 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 EC2 中配置新节点。其他集群可以复用节点模板。
+为 EC2 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 EC2 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -60,14 +60,14 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
### 3. 使用节点模板创建具有节点池的集群
-将一个或多个节点池添加到你的集群。有关节点池的更多信息,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+将一个或多个节点池添加到你的集群。有关节点池的更多信息,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 点击 **☰ > 集群管理**。
1. 在**集群**页面上,单击**创建**。
1. 单击 **Amazon EC2**。
-1. 为每个 Kubernetes 角色创建一个节点池。为每个节点池选择你已创建的节点模板。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 为每个 Kubernetes 角色创建一个节点池。为每个节点池选择你已创建的节点模板。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
-1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。参见[选择云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md)来配置 Kubernetes 云提供商。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
+1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。参见[选择云提供商](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)来配置 Kubernetes 云提供商。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
:::note
@@ -103,7 +103,7 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
1. 单击 **Amazon EC2**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [EC2 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
index a3af5bca578..e9e2c6fbfb8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
@@ -65,7 +65,7 @@ az ad sp create-for-rbac \
### 2. 使用云凭证创建节点模板
-为 Azure 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Azure 中配置新节点。其他集群可以复用节点模板。
+为 Azure 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Azure 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -81,7 +81,7 @@ az ad sp create-for-rbac \
1. 在**集群**页面上,单击**创建**。
1. 单击 **Azure**。
1. 输入**集群名称**。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池及其最佳实践的更多信息,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池及其最佳实践的更多信息,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 在**集群配置**中,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 单击**创建**。
@@ -112,7 +112,7 @@ az ad sp create-for-rbac \
1. 单击 **Azure**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [Azure 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
index 9eb5ce53af6..c32a765cb3c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
@@ -13,9 +13,9 @@ Rancher 可以在 AOS (AHV) 中配置节点并在其上安装 Kubernetes。在 A
Nutanix 集群可能由多组具有不同属性(例如内存或 vCPU 数量)的 VM 组成。这种分组允许对每个 Kubernetes 角色的节点大小进行细粒度控制。
-- [创建 Nutanix 集群](provision-kubernetes-clusters-in-aos.md#creating-a-nutanix-aos-cluster)
-- [配置存储](provision-kubernetes-clusters-in-aos.md)
+- [创建 Nutanix 集群](./provision-kubernetes-clusters-in-aos.md#1-创建节点模板)
+- [配置存储](./provision-kubernetes-clusters-in-aos.md)
## 创建 Nutanix 集群
-在[本节](provision-kubernetes-clusters-in-aos.md)中,你将学习如何使用 Rancher 在 Nutanix AOS 中安装 [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes 集群。
\ No newline at end of file
+在[本节](./provision-kubernetes-clusters-in-aos.md)中,你将学习如何使用 Rancher 在 Nutanix AOS 中安装 [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes 集群。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
index c360364aa91..1ce64df1fe9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
@@ -51,7 +51,7 @@ title: 在 Nutanix AOS 中配置 Kubernetes 集群
### 1. 创建节点模板
-为 Nutanix AOS 创建[节点模板](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Nutanix AOS 中配置新节点。其他集群可以复用节点模板。
+为 Nutanix AOS 创建[节点模板](../use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Nutanix AOS 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -71,7 +71,7 @@ title: 在 Nutanix AOS 中配置 Kubernetes 集群
1. 输入**集群名称**,然后点击**继续**。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点池)。
1. 检查并确认你的选项。然后单击**创建**。
**结果**:集群已创建,并处于 **Provisioning** 状态。Rancher 已在你的集群中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
index 806090bff7c..6defefed5a6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
@@ -65,7 +65,7 @@ title: 在 vSphere 中配置 Kubernetes 集群
### 2. 使用云凭证创建节点模板
-为 vSphere 创建[节点模板](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 vSphere 中配置新节点。其他集群可以复用节点模板。
+为 vSphere 创建[节点模板](../use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 vSphere 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -86,7 +86,7 @@ title: 在 vSphere 中配置 Kubernetes 集群
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 如果你想稍后动态配置持久存储或其他基础设施,你需要修改集群 YAML 文件来启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../use-new-nodes-in-an-infra-provider.md#节点池)。
1. 检查并确认你的选项。然后单击**创建**。
**结果**:
@@ -107,4 +107,4 @@ title: 在 vSphere 中配置 Kubernetes 集群
- **通过 kubectl CLI 访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#在工作站使用-kubectl-访问集群)在你的工作站上使用 kubectl 访问集群。在这种情况下,你将通过 Rancher Server 的身份验证代理进行身份验证,然后 Rancher 会让你连接到下游集群。此方法允许你在没有 Rancher UI 的情况下管理集群。
- **通过 kubectl CLI 使用授权的集群端点访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)直接使用 kubectl 访问集群,而无需通过 Rancher 进行身份验证。我们建议设置此替代方法来访问集群,以便在无法连接到 Rancher 时访问集群。
-- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../../../pages-for-subheaders/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
+- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../manage-clusters/provisioning-storage-examples/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
index aef1f3c08b4..88944e339fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
@@ -31,7 +31,7 @@ title: 将用户添加到集群
如果配置了外部身份验证:
- - 在你键入时,Rancher 会从你的[外部身份验证](../../../../pages-for-subheaders/authentication-config.md)源返回用户。
+ - 在你键入时,Rancher 会从你的[外部身份验证](../../authentication-permissions-and-global-configuration/authentication-config/authentication-config.md)源返回用户。
:::note 使用 AD 但找不到你的用户?
@@ -43,7 +43,7 @@ title: 将用户添加到集群
:::note
- 如果你以本地用户身份登录,外部用户不会显示在你的搜索结果中。有关详细信息,请参阅[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+ 如果你以本地用户身份登录,外部用户不会显示在你的搜索结果中。有关详细信息,请参阅[外部身份验证配置和主体用户](../../authentication-permissions-and-global-configuration/authentication-config/authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
index 364a4012eee..587d3b1fc3b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
@@ -4,7 +4,7 @@ title: 授权集群端点的工作原理
本文介绍 kubectl CLI、kubeconfig 文件和授权集群端点如何协同工作,使你可以直接访问下游 Kubernetes 集群,而无需通过 Rancher Server 进行身份验证。本文旨在为[设置 kubectl 以直接访问集群的说明](use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)提供背景信息和上下文。
-### Kubeconfig 文件说明
+## Kubeconfig 文件说明
kubeconfig 文件是与 kubectl 命令行工具(或其他客户端)结合使用时用于配置 Kubernetes 访问的文件。
@@ -19,11 +19,11 @@ kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**
下载 kubeconfig 文件后,你将能够使用 kubeconfig 文件及其 Kubernetes [上下文](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration)访问下游集群。
-如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](./authorized-cluster-endpoint.md) 存在于你的 PATH 中。
+如果管理员[关闭了 kubeconfig 令牌生成](../../../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](../../../../reference-guides/cli-with-rancher/rancher-cli.md) 存在于你的 PATH 中。
-### RKE 集群的两种身份验证方法
+## RKE 集群的两种身份验证方法
-如果集群不是 [RKE 集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md),kubeconfig 文件只允许你以一种方式访问集群,即通过 Rancher Server 进行身份验证,然后 Rancher 允许你在集群上运行 kubectl 命令。
+如果集群不是 [RKE 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md),kubeconfig 文件只允许你以一种方式访问集群,即通过 Rancher Server 进行身份验证,然后 Rancher 允许你在集群上运行 kubectl 命令。
对于 RKE 集群,kubeconfig 文件允许你通过两种方式进行身份验证:
@@ -36,7 +36,7 @@ kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**
[架构介绍](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md)也详细解释了这些与下游 Kubernetes 集群通信的方法,并介绍了 Rancher 的工作原理以及 Rancher 如何与下游集群通信的详细信息。
-### 关于 kube-api-auth 身份验证 Webhook
+## 关于 kube-api-auth 身份验证 Webhook
`kube-api-auth` 微服务是为[授权集群端点](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点)提供用户认证功能而部署的。当你使用 `kubectl` 访问下游集群时,集群的 Kubernetes API server 会使用 `kube-api-auth` 服务作为 webhook 对你进行身份验证。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
index ac6a4a338fb..4532ddf2e7b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
@@ -7,7 +7,7 @@ description: "了解如何通过 kubectl Shell 使用 kubectl,或通过 kubect
有关使用 kubectl 的更多信息,请参阅 [Kubernetes 文档:kubectl 概述](https://kubernetes.io/docs/reference/kubectl/overview/)。
-### 在 Rancher UI 中使用 kubectl shell 访问集群
+## 在 Rancher UI 中使用 kubectl shell 访问集群
你可以通过登录 Rancher 并在 UI 中打开 kubectl shell 来访问和管理你的集群。你无需进一步配置。
@@ -15,7 +15,7 @@ description: "了解如何通过 kubectl Shell 使用 kubectl,或通过 kubect
1. 转到要使用 kubectl 访问的集群,然后单击 **Explore**。
1. 在顶部导航菜单中,单击 **Kubectl Shell** 按钮。使用打开的窗口与你的 Kubernetes 集群进行交互。
-### 在工作站使用 kubectl 访问集群
+## 在工作站使用 kubectl 访问集群
本节介绍如何下载集群的 kubeconfig 文件、从工作站启动 kubectl 以及访问下游集群。
@@ -38,13 +38,13 @@ kubectl --kubeconfig /custom/path/kube.config get pods
1. 从工作站启动 kubectl。使用它与 Kubernetes 集群进行交互。
-### 使用 kubectl 创建的资源的注意事项
+## 使用 kubectl 创建的资源的注意事项
Rancher 会发现并显示由 `kubectl` 创建的资源。但是在发现资源的时候,这些资源可能没有包括所有必须的注释。如果资源已经使用 Rancher UI/API 进行操作(例如,扩展工作负载),但是由于缺少注释,资源的重新创建可能会触发。只有在首次对发现的资源进行操作时,这种情况才会发生。
## 直接使用下游集群进行身份验证
-本节旨在帮助你设置访问 [RKE 集群的替代方法](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本节旨在帮助你设置访问 [RKE 集群的替代方法](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
此方法仅适用于启用了[授权集群端点](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点)的 RKE、RKE2 和 K3s集群。在 Rancher 创建集群时,Rancher 会生成一个 kubeconfig 文件,其中包含用于访问集群的额外 kubectl 上下文。该上下文允许你使用 kubectl 通过下游集群进行身份验证,而无需通过 Rancher 进行身份验证。有关授权集群端点如何工作的详细说明,请参阅[此页面](authorized-cluster-endpoint.md)。
@@ -78,7 +78,7 @@ CURRENT NAME CLUSTER AUTHINFO N
当 `kubectl` 正常工作时,它确认你可以绕过 Rancher 的身份验证代理访问集群。
-### 直接连接到定义了 FQDN 的集群
+## 直接连接到定义了 FQDN 的集群
如果集群定义了 FQDN,将会创建一个引用 FQDN 的上下文。上下文将命名为 `-fqdn`。当你想在没有 Rancher 的情况下使用 `kubectl` 访问这个集群时,你需要使用这个上下文。
@@ -92,7 +92,7 @@ kubectl --context -fqdn get nodes
kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods
```
-### 直接连接到未定义 FQDN 的集群
+## 直接连接到未定义 FQDN 的集群
如果集群没有定义 FQDN,则会创建额外的上下文来引用 controlplane 中每个节点的 IP 地址。每个上下文将被命名为 `-`。当你想在没有 Rancher 的情况下使用 `kubectl` 访问这个集群时,你需要使用这个上下文。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
index d881d6c968e..ea35bd9fc33 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
@@ -4,7 +4,7 @@ title: 添加 Pod 安全策略
:::note 先决条件:
-以下选项仅适用于[使用 RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+以下选项仅适用于[使用 RKE 启动的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
@@ -18,7 +18,7 @@ title: 添加 Pod 安全策略
:::note
- 此选项仅适用于[由 RKE 配置的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+ 此选项仅适用于[由 RKE 配置的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
index 4ed2eb4d44d..f5095790dfc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
@@ -23,9 +23,9 @@ description: 了解从 Rancher 启动的 Kubernetes 集群中删除节点时的
| 在 `management.cattle.io` API Group 下创建的所有资源 | ✓ | ✓ | ✓ | |
| Rancher v2.x 创建的所有 CRD | ✓ | ✓ | ✓ | |
-[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md
-[2]: ../../../pages-for-subheaders/use-existing-nodes.md
-[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md
+[1]: ../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md
+[2]: ../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+[3]: ../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md
[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
## 通过 Rancher UI 删除集群中的节点
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
index 31c7c4ca4fd..4c4f0aaee77 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
@@ -11,7 +11,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
本文假设你已了解 Kubernetes 的持久卷、持久卷声明和存储类的概念。如需更多信息,请参阅[存储的工作原理](manage-persistent-storage/about-persistent-storage.md)部分。
-### 先决条件
+## 先决条件
设置持久存储需要`管理卷`的[角色](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)。
@@ -21,7 +21,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
如果要将现有的持久存储连接到集群,则不需要启用云提供商。
-### 设置现有存储
+## 设置现有存储
设置现有存储的总体流程如下:
@@ -32,7 +32,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
有关详细信息和先决条件,请参阅[此页面](manage-persistent-storage/set-up-existing-storage.md)。
-### 在 Rancher 中动态配置新存储
+## 在 Rancher 中动态配置新存储
配置新存储的总体流程如下:
@@ -42,7 +42,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
有关详细信息和先决条件,请参阅[此页面](manage-persistent-storage/dynamically-provision-new-storage.md)。
-### Longhorn 存储
+## Longhorn 存储
[Longhorn](https://longhorn.io/) 是一个轻量级、可靠、易用的 Kubernetes 分布式块存储系统。
@@ -52,28 +52,28 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
Rancher v2.5 简化了在 Rancher 管理的集群上安装 Longhorn 的过程。详情请参见[本页面](../../../../integrations-in-rancher/longhorn/longhorn.md)。
-### 配置存储示例
+## 配置存储示例
我们提供了如何使用 [NFS](../provisioning-storage-examples/nfs-storage.md), [vSphere](../provisioning-storage-examples/vsphere-storage.md),和 [Amazon 的 EBS](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) 来配置存储的示例。
-### GlusterFS 卷
+## GlusterFS 卷
在将数据存储在 GlusterFS 卷上的集群中,你可能会遇到重启 `kubelet` 后 pod 无法挂载卷的问题。有关避免此情况发生的详细信息,请参阅[此页面](manage-persistent-storage/about-glusterfs-volumes.md)。
-### iSCSI 卷
+## iSCSI 卷
在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。有关解决此问题的详细信息,请参阅[此页面](manage-persistent-storage/install-iscsi-volumes.md)。
-### hostPath 卷
+## hostPath 卷
在创建 hostPath 卷之前,你需要在集群配置中设置 [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/)。这会将路径作为卷安装在你的 kubelet 中,可用于工作负载中的 hostPath 卷。
-### 将 vSphere Cloud Provider 从树内迁移到树外
+## 将 vSphere Cloud Provider 从树内迁移到树外
Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。
有关如何从树内 vSphere 云提供商迁移到树外,以及如何在迁移后管理现有虚拟机,请参阅[此页面](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
-### 相关链接
+## 相关链接
- [Kubernetes 文档: 存储](https://kubernetes.io/docs/concepts/storage/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
index 9a4c878d631..69dd77f4d7e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
@@ -4,7 +4,7 @@ title: GlusterFS 卷
:::note
-本文仅适用于 [RKE 集群](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本文仅适用于 [RKE 集群](../../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
index 6582e5e0f50..0d0b638fd10 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
@@ -15,11 +15,11 @@ title: 在 Rancher 中动态配置新存储
1. [添加一个存储类并将其配置为使用你的存储](#1-添加一个存储类并将其配置为使用你的存储)
2. [为使用 StatefulSet 部署的 Pod 使用存储类](#2-为使用-statefulset-部署的-pod-使用存储类)
-### 先决条件
+## 先决条件
- 设置持久存储需要`管理卷`的[角色](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)。
- 如果你要为云集群配置存储,则存储和集群主机必须使用相同的云提供商。
-- 必须启用云提供商。有关启用云提供商的详细信息,请参阅[此页面](../../../../../pages-for-subheaders/set-up-cloud-providers.md)。
+- 必须启用云提供商。有关启用云提供商的详细信息,请参阅[此页面](../../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。
- 确保你的存储卷插件可以启用。
默认情况下启用以下存储卷插件:
@@ -38,7 +38,7 @@ title: 在 Rancher 中动态配置新存储
如果你的存储卷插件没有在上述列表中,你需要[使用功能开关来启用不受支持的存储驱动](../../../../advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)。
-### 1. 添加一个存储类并将其配置为使用你的存储
+## 1. 添加一个存储类并将其配置为使用你的存储
这些步骤描述了如何在集群级别设置存储类:
@@ -55,7 +55,7 @@ title: 在 Rancher 中动态配置新存储
有关存储类参数的完整信息,请参阅官方 [Kubernetes 文档](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters)。
-### 2. 为使用 StatefulSet 部署的 Pod 使用存储类
+## 2. 为使用 StatefulSet 部署的 Pod 使用存储类
StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘性标识。在这个 StatefulSet 中,我们将配置一个 VolumeClaimTemplate。StatefulSet 管理的每个 Pod 都将部署一个基于此 VolumeClaimTemplate 的 PersistentVolumeClaim。PersistentVolumeClaim 将引用我们创建的 StorageClass。因此,在部署 StatefulSet 管理的每个 Pod 时,都会使用 PersistentVolumeClaim 中定义的 StorageClass 来绑定到动态配置的存储。
@@ -66,7 +66,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **StatefulSet**。
1. 在**卷声明模板**选项卡上,单击**添加声明模板**。
1. 输入持久卷的名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 点击**启动**。
@@ -80,7 +80,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **⋮ > 编辑配置**,转到使用由 StorageClass 配置的存储的工作负载。
1. 在**卷声明模板**中,单击**添加声明模板**。
1. 输入持久卷名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 单击**保存**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
index ea7fde37298..aaa2d24f67f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
@@ -2,7 +2,7 @@
title: iSCSI 卷
---
-在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。成此问题的原因很可能是 iSCSI 启动器工具不兼容。你可以在每个集群节点上安装 iSCSI 启动器工具来解决此问题。
+在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。成此问题的原因很可能是 iSCSI 启动器工具不兼容。你可以在每个集群节点上安装 iSCSI 启动器工具来解决此问题。
将数据存储到 iSCSI 卷的由 Rancher 启动的 Kubernetes 集群使用 [iSCSI 启动器工具](http://www.open-iscsi.com/),该工具嵌入在 kubelet 的 `rancher/hyperkube` Docker 镜像中。该工具从每个 kubelet(即 _initiator_)发现并发起与 iSCSI 卷(即 _target_)的会话。但是,在某些情况下,initiator 和 target 上安装的 iSCSI 启动器工具的版本可能不匹配,从而导致连接失败。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
index 2232fc9c253..2b5575f7794 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
@@ -16,12 +16,12 @@ title: 设置现有存储
2. [添加一个引用持久存储的 PersistentVolume](#2-添加一个引用持久存储的-persistentvolume)。
3. [为使用 StatefulSet 部署的 Pod 使用存储类](#3-为使用-statefulset-部署的-pod-使用存储类)
-### 先决条件
+## 先决条件
- 要将持久卷创建为 Kubernetes 资源,你必须具有`管理卷`的[角色。](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)
- 如果你要为云集群配置存储,则存储和集群主机必须使用相同的云提供商。
-### 1. 设置持久存储
+## 1. 设置持久存储
在 Rancher 中创建持久卷不会创建存储卷。它只创建映射到现有卷的 Kubernetes 资源。因此,在你可以将持久卷创建为 Kubernetes 资源之前,你必须先配置存储。
@@ -29,7 +29,7 @@ title: 设置现有存储
如果你有一个块存储池并且不想使用云提供商,你可以使用 Longhorn 为 Kubernetes 集群提供持久存储。详情请参见[本页面](../../../../../integrations-in-rancher/longhorn.md)。
-### 2. 添加一个引用持久存储的 PersistentVolume
+## 2. 添加一个引用持久存储的 PersistentVolume
这些步骤描述了如何在 Kubernetes 的集群级别设置 PersistentVolume。
@@ -48,7 +48,7 @@ title: 设置现有存储
**结果**:已创建你的新持久卷。
-### 3. 为使用 StatefulSet 部署的 Pod 使用存储类
+## 3. 为使用 StatefulSet 部署的 Pod 使用存储类
StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘性标识。在这个 StatefulSet 中,我们将配置一个 VolumeClaimTemplate。StatefulSet 管理的每个 Pod 都将部署一个基于此 VolumeClaimTemplate 的 PersistentVolumeClaim。PersistentVolumeClaim 将引用我们创建的 PersistentVolume。因此,在部署 StatefulSet 管理的每个 Pod 时,都会绑定一个 PersistentVolumeClaim 中定义的 PersistentVolume。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
index a262560e90f..4a684b2eabe 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
@@ -234,7 +234,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
我们配置 AWS 后,我们需要创建虚拟机来引导集群:
-* master (etcd+controlplane):根据需要部署三个适当大小的 master 实例。详情请参见[生产就绪集群的建议](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md)。
+* master (etcd+controlplane):根据需要部署三个适当大小的 master 实例。详情请参见[生产就绪集群的建议](../../kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md)。
* IAM 角色:`K8sMasterRole`
* 安全组:`K8sMasterSg`
* 标签:
@@ -300,7 +300,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
| max-node-provision-time | "15m" | CA 等待节点配置的最长时间 |
| nodes | - | 以云提供商接受的格式设置节点组的最小、最大大小和其他配置数据。可以多次使用。格式是 `::`。 |
| node-group-auto-discovery | - | 节点组自动发现的一个或多个定义。定义表示为 `:[[=]]` |
-| estimator | - | "binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
+| estimator |"binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
| expander | "random" | 要在扩容中使用的节点组扩展器的类型。可用值:`["random","most-pods","least-waste","price","priority"]` |
| ignore-daemonsets-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 DaemonSet pod |
| ignore-mirror-pods-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 Mirror pod |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
index a942534d1c6..359d10f52cb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
@@ -9,11 +9,11 @@ title: 节点和节点池
1. 找到要管理其节点的集群,然后单击行末尾的**浏览**按钮。
1. 从左侧导航中选择**节点**。
-不同的集群配置[选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。
+不同的集群配置[选项](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。
:::note
-如果你想管理 _集群_ 而不是单个节点,请参阅[编辑集群](../../../pages-for-subheaders/cluster-configuration.md)。
+如果你想管理 _集群_ 而不是单个节点,请参阅[编辑集群](../../../reference-guides/cluster-configuration/cluster-configuration.md)。
:::
@@ -32,28 +32,28 @@ title: 节点和节点池
| [下载密钥](#通过-ssh-连接到由基础设施提供商托管的节点) | ✓ | | | | | 下载 SSH 密钥以通过 SSH 连接到节点。 |
| [节点缩放](#扩缩节点) | ✓ | | | ✓ | | 向上或向下扩展节点池中的节点数。 |
-[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md
-[2]: ../../../pages-for-subheaders/use-existing-nodes.md
-[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md
-[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
-[5]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
+[1]: ../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md
+[2]: ../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+[3]: ../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md
+[4]: ../kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
+[5]: ../kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
\* 可通过 View API 访问的删除选项
### 由基础设施提供商托管的节点
-在[托管在基础设施提供商](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)中的节点上配置由 Rancher 启动的 Kubernetes 集群时,你可以使用节点池。
+在[托管在基础设施提供商](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)中的节点上配置由 Rancher 启动的 Kubernetes 集群时,你可以使用节点池。
-如果节点池被编辑,通过[节点池选项](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)配置的集群可以纵向扩容或缩容。
+如果节点池被编辑,通过[节点池选项](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点池)配置的集群可以纵向扩容或缩容。
-如果启用[节点自动替换功能](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点自动替换),节点池还可以自动维护在初始集群配置期间设置的节点规模。该规模决定了 Rancher 为集群维护的 active 节点的数量。
+如果启用[节点自动替换功能](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点自动替换),节点池还可以自动维护在初始集群配置期间设置的节点规模。该规模决定了 Rancher 为集群维护的 active 节点的数量。
-Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)来替换节点池中的节点。每个节点模板都使用云提供商凭证来允许 Rancher 在基础设施提供商中设置节点。
+Rancher 使用[节点模板](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点模板)来替换节点池中的节点。每个节点模板都使用云提供商凭证来允许 Rancher 在基础设施提供商中设置节点。
### 由托管 Kubernetes 提供商配置的节点
-用于管理[由 Kubernetes 提供商托管](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)的节点的选项在 Rancher 中有些限制。例如,你不能使用 Rancher UI 向上或向下缩放节点数量,而是需要直接编辑集群。
+用于管理[由 Kubernetes 提供商托管](../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)的节点的选项在 Rancher 中有些限制。例如,你不能使用 Rancher UI 向上或向下缩放节点数量,而是需要直接编辑集群。
### 注册节点
@@ -72,13 +72,13 @@ Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-i
## 在 Rancher API 中查看节点
-选择此选项以查看节点的 [API 端点](../../../pages-for-subheaders/about-the-api.md)。
+选择此选项以查看节点的 [API 端点](../../../api/quickstart.md)
## 删除节点
使用 **Delete** 从云提供商中删除有缺陷的节点。
-当你删除有缺陷的节点时,如果该节点在节点池中并启用了[节点自动替换](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点自动替换),Rancher 可以自动将其替换为具有相同配置的节点。
+当你删除有缺陷的节点时,如果该节点在节点池中并启用了[节点自动替换](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点自动替换),Rancher 可以自动将其替换为具有相同配置的节点。
:::tip
@@ -88,11 +88,11 @@ Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-i
## 扩缩节点
-对于由基础设施提供商托管的节点,你可以使用缩放控件来缩放每个[节点池](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)中的节点数量。此选项不适用于其他集群类型。
+对于由基础设施提供商托管的节点,你可以使用缩放控件来缩放每个[节点池](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点池)中的节点数量。此选项不适用于其他集群类型。
## 通过 SSH 连接到由基础设施提供商托管的节点
-对于[由基础设施提供商托管的节点](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md),你可以选择下载其 SSH 密钥,以便从桌面远程连接到它。
+对于[由基础设施提供商托管的节点](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),你可以选择下载其 SSH 密钥,以便从桌面远程连接到它。
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到要通过 SSH 连接到节点的集群,然后单击集群名称。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
index f8e5d94e510..0353e8cc6e7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
@@ -34,14 +34,14 @@ Kubernetes 支持由同一个物理集群支持的多个虚拟集群。这些虚
你可以将以下资源直接分配给命名空间:
-- [工作负载](../../../pages-for-subheaders/workloads-and-pods.md)
-- [负载均衡器/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md)
-- [服务发现记录](../../new-user-guides/kubernetes-resources-setup/create-services.md)
-- [持久卷声明](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md)
-- [证书](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md)
-- [ConfigMap](../../new-user-guides/kubernetes-resources-setup/configmaps.md)
-- [镜像仓库](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md)
-- [密文](../../new-user-guides/kubernetes-resources-setup/secrets.md)
+- [工作负载](../kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)
+- [负载均衡器/Ingress](../kubernetes-resources-setup/load-balancer-and-ingress-controller/load-balancer-and-ingress-controller.md)
+- [服务发现记录](../kubernetes-resources-setup/create-services.md)
+- [持久卷声明](../manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)
+- [证书](../kubernetes-resources-setup/encrypt-http-communication.md)
+- [ConfigMap](../kubernetes-resources-setup/configmaps.md)
+- [镜像仓库](../kubernetes-resources-setup/kubernetes-and-docker-registries.md)
+- [密文](../kubernetes-resources-setup/secrets.md)
为了在 vanilla Kubernetes 集群中管理权限,集群管理员要为每个命名空间配置基于角色的访问策略。Rancher 在项目级别上分配用户权限,项目中的命名空间会自动继承项目的权限。
@@ -169,14 +169,14 @@ Rancher 在 Kubernetes 之上进行了扩展,除了集群级别之外,还允
### 4. 可选:添加资源配额
-资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
要添加资源配额:
1. 在**资源配额**选项卡中,单击**添加资源**。
-1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 输入**项目限制**和**命名空间默认限制**的值。
-1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../../pages-for-subheaders/manage-project-resource-quotas.md)。
+1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 单击**创建**。
**结果**:项目已创建。你可以从集群的**项目/命名空间**视图中查看它。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
index 569a052d5fe..35b432d9f01 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
@@ -6,7 +6,7 @@ title: NFS 存储
:::note
-- 如果你已经拥有 NFS 共享,则无需配置新的 NFS 服务器即可在 Rancher 中使用 NFS 卷插件。这样的话,你可以跳过此过程的其余部分并直接[添加存储](../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md)。
+- 如果你已经拥有 NFS 共享,则无需配置新的 NFS 服务器即可在 Rancher 中使用 NFS 卷插件。这样的话,你可以跳过此过程的其余部分并直接[添加存储](../create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)。
- 此教程演示了如何使用 Ubuntu 设置 NFS 服务器。你也应该能够将这些说明用于其他 Linux 发行版(例如 Debian、RHEL、Arch Linux 等)。有关如何使用另一个 Linux 发行版创建 NFS 服务器的官方说明,请参阅发行版的文档。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
index 1fb1c5d5e06..db36152d271 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
@@ -6,12 +6,11 @@ title: vSphere 存储
为了在 vSphere 中动态调配存储,必须启用 vSphere 提供商。有关更多信息,请参阅[树外 vSphere](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md) 和[树内 vSphere](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)。
+## 先决条件
-### 先决条件
+为了在 [Rancher Kubernetes Engine (RKE)](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 集群中配置 vSphere 卷,[vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) 必须在[集群选项](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)中显式启用。
-为了在 [Rancher Kubernetes Engine (RKE)](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群中配置 vSphere 卷,[vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) 必须在[集群选项](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)中显式启用。
-
-### 创建一个 StorageClass
+## 创建一个 StorageClass
:::tip
@@ -31,7 +30,7 @@ title: vSphere 存储
5. 可选地,你可以在**参数**下指定存储类的其他属性。有关详细信息,请参阅 [vSphere 存储文档](https://github.com/vmware-archive/vsphere-storage-for-kubernetes/blob/master/documentation/storageclass.md)。
5. 单击**创建**。
-### 创建使用 vSphere 卷的工作负载
+## 创建使用 vSphere 卷的工作负载
1. 在左侧导航栏中,单击**工作负载**。
1. 单击**创建**。
@@ -43,7 +42,7 @@ title: vSphere 存储
7. 在**挂载点**字段中指定路径。这是卷将安装在容器文件系统中的完整路径,例如 `/persistent`。
8. 单击**创建**。
-### 验证卷的持久性
+## 验证卷的持久性
1. 在左侧导航栏中,单击**工作负载 > Pod**。
1. 转到你刚刚创建的工作负载,然后单击 **⋮ > 执行命令行**。
@@ -58,7 +57,7 @@ title: vSphere 存储

-### 为什么使用 StatefulSet 替代 Deployment
+## 为什么使用 StatefulSet 替代 Deployment
对于消耗 vSphere 存储的工作负载,你应该始终使用 [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/),因为这种资源类型旨在解决 VMDK 块存储警告。
@@ -66,7 +65,7 @@ title: vSphere 存储
即使使用仅具有单个副本的 deployment 资源也可能在更新 deployment 时出现死锁情况。如果更新的 pod 被调度到不同的节点,由于 VMDK 仍然连接到另一个节点,因此 pod 将无法启动。
-### 相关链接
+## 相关链接
- [用于 Kubernetes 的 vSphere 存储](https://github.com/vmware-archive/vsphere-storage-for-kubernetes/tree/master/documentation)
- [Kubernetes 持久卷](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
index 62550604077..be1431337b7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
@@ -47,7 +47,7 @@ title: 证书轮换
:::
-### 证书轮换
+## 证书轮换
Rancher 启动的 Kubernetes 集群能够通过 UI 轮换自动生成的证书。
@@ -62,7 +62,7 @@ Rancher 启动的 Kubernetes 集群能够通过 UI 轮换自动生成的证书
**结果**:将轮换所选证书,相关服务将重新启动以使用新证书。
-### 补充说明
+## 补充说明
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
index cfebc6443aa..b8c52cf70c5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
@@ -2,7 +2,7 @@
title: 加密密钥轮换
---
-### RKE1 加密密钥轮换
+## RKE1 加密密钥轮换
1. 使用以下两个选项之一来启用加密密钥轮换:
@@ -30,7 +30,7 @@ title: 加密密钥轮换
-### RKE2 加密密钥轮换
+## RKE2 加密密钥轮换
_**v2.6.7 新功能**_
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-namespaces.md
index 1c8aeba0767..c91f5830da3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/how-to-guides/new-user-guides/manage-namespaces.md
@@ -8,24 +8,24 @@ title: 命名空间
可以直接分配给命名空间的资源包括:
-- [工作负载](../../pages-for-subheaders/workloads-and-pods.md)
-- [负载均衡器/Ingress](../../pages-for-subheaders/load-balancer-and-ingress-controller.md)
-- [服务发现记录](kubernetes-resources-setup/create-services.md)
-- [持久卷声明](../../pages-for-subheaders/create-kubernetes-persistent-storage.md)
-- [证书](kubernetes-resources-setup/encrypt-http-communication.md)
-- [ConfigMap](kubernetes-resources-setup/configmaps.md)
-- [镜像仓库](kubernetes-resources-setup/kubernetes-and-docker-registries.md)
-- [密文](kubernetes-resources-setup/secrets.md)
+- [工作负载](./kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)
+- [负载均衡器/Ingress](./kubernetes-resources-setup/load-balancer-and-ingress-controller/load-balancer-and-ingress-controller.md)
+- [服务发现记录](./kubernetes-resources-setup/create-services.md)
+- [持久卷声明](./manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)
+- [证书](./kubernetes-resources-setup/encrypt-http-communication.md)
+- [ConfigMap](./kubernetes-resources-setup/configmaps.md)
+- [镜像仓库](./kubernetes-resources-setup/kubernetes-and-docker-registries.md)
+- [密文](./kubernetes-resources-setup/secrets.md)
为了在 vanilla Kubernetes 集群中管理权限,集群管理员要为每个命名空间配置基于角色的访问策略。Rancher 在项目级别上分配用户权限,项目中的命名空间会自动继承项目的权限。
:::note
-如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](manage-namespaces.md),以确保你有权访问该命名空间。
+如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](#创建命名空间),以确保你有权访问该命名空间。
:::
-### 创建命名空间
+## 创建命名空间
创建一个新的命名空间来隔离项目中的应用和资源。
@@ -40,13 +40,13 @@ title: 命名空间
1. 单击**集群 > 项目/命名空间**。
1. 转到要添加命名空间的项目,并单击**创建命名空间**。或者,你也可以转到**不在项目内**以创建不与项目关联的命名空间。
-1. **可选**:如果你的项目具有有效的[资源配额](../../pages-for-subheaders/manage-project-resource-quotas.md),你可以覆盖默认资源**限制**(限制命名空间可以使用的资源)。
+1. **可选**:如果你的项目具有有效的[资源配额](../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md),你可以覆盖默认资源**限制**(限制命名空间可以使用的资源)。
1. 输入**名称**,然后单击**创建**。
**结果**:已将命名空间添加到项目中。你可以开始将集群资源分配给命名空间。
-### 将命名空间移动到另一个项目
+## 将命名空间移动到另一个项目
在某些情况下(例如希望其他团队使用该应用时),集群管理员和成员可能需要将命名空间移动到另一个项目:
@@ -60,14 +60,14 @@ title: 命名空间
:::note 注意事项:
- 不要移动 `System` 项目中的命名空间。移动命名空间可能会对集群网络产生不利影响。
- - 你不能将命名空间移动到已配置[资源配额](../../pages-for-subheaders/manage-project-resource-quotas.md)的项目中。
+ - 你不能将命名空间移动到已配置[资源配额](../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)的项目中。
- 如果你将命名空间从已设置配额的项目移动到未设置配额的项目,则会删除该命名空间的配额。
1. 为新命名空间选择一个新项目,然后单击**移动**。你也可以选择**无**,从而将命名空间从所有项目中移除。
**结果**:你的命名空间已移至其他项目(或从所有项目中移除)。如果命名空间绑定了项目资源,命名空间会释放这些资源,然后绑定新项目的资源。
-### 编辑命名空间资源配额
+## 编辑命名空间资源配额
你可以覆盖命名空间默认限制,从而为特定命名空间提供对更多(或更少)项目资源的访问权限:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/configuration-reference.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/configuration-reference.md
index 7a530783b36..2b85b101377 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/configuration-reference.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/configuration-reference.md
@@ -10,7 +10,7 @@ title: 配置
1. 在**集群**页面上,转到要配置 CIS 扫描的集群,然后单击 **Explore**。
1. 在左侧导航栏中,单击 **CIS Benchmark**。
-### 扫描
+## 扫描
扫描是用来根据定义的配置文件,在集群上触发 CIS 扫描的。扫描完成后会创建一份报告。
@@ -27,7 +27,7 @@ spec:
scanProfileName: rke-profile-hardened
```
-### 配置文件
+## 配置文件
配置文件包含 CIS 扫描的配置,包括要使用的 Benchmark 测试版本以及要在该 Benchmark 测试中跳过的测试。
@@ -62,7 +62,7 @@ spec:
- "1.1.21"
```
-### Benchmark 版本
+## Benchmark 版本
Benchmark 版本是指使用 `kube-bench` 运行的 Benchmark 名称,以及该 Benchmark 的有效配置参数。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/custom-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/custom-benchmark.md
index 6818ac56118..8cd3c582736 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/custom-benchmark.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cis-scans/custom-benchmark.md
@@ -13,7 +13,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
按照以下所有步骤添加自定义 Benchmark 版本并使用它运行扫描。
-### 1. 准备自定义 Benchmark 版本 ConfigMap
+## 1. 准备自定义 Benchmark 版本 ConfigMap
要创建自定义 Benchmark 版本,你需要先创建一个包含 Benchmark 版本配置文件的 ConfigMap,并将其上传到要运行扫描的 Kubernetes 集群。
@@ -38,7 +38,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
kubectl create configmap -n foo --from-file=
```
-### 2. 将自定义 Benchmark 版本添加到集群
+## 2. 将自定义 Benchmark 版本添加到集群
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到要添加自定义 Benchmark 的集群,然后单击 **Explore**。
@@ -50,7 +50,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
1. 添加最低和最高 Kubernetes 版本限制(如果有)。
1. 单击**创建**。
-### 3. 为自定义 Benchmark 版本创建新配置文件
+## 3. 为自定义 Benchmark 版本创建新配置文件
要使用你的自定义 Benchmark 版本运行扫描,你需要添加一个指向此 Benchmark 版本的新配置文件:
@@ -62,7 +62,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
1. 在下拉列表中选择 Benchmark 版本。
1. 单击**创建**。
-### 4. 使用自定义 Benchmark 版本运行扫描
+## 4. 使用自定义 Benchmark 版本运行扫描
指向你的自定义 Benchmark 版本的 `foo` 配置文件创建完成后,你可以创建一个新的扫描,从而在 Benchmark 版本中运行自定义测试。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
index 8431d4fbd66..5a6fed5b32a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
@@ -2,7 +2,7 @@
title: 先决条件
---
-### 1. 设置许可证管理器和购买支持
+## 1. 设置许可证管理器和购买支持
首先,完成许可证管理器设置的[第一步](https://docs.aws.amazon.com/license-manager/latest/userguide/getting-started.html)。
然后,转到 AWS Marketplace。找到 “Rancher Premium Support Billing Container Starter Pack”。最后,购买至少一项 Entitlement。
@@ -11,7 +11,7 @@ title: 先决条件
> **注意**:每项 Entitlement 都对一定数量的节点授予访问支持的权限。你可以后续根据需要购买更多许可证。
-### 2. 创建 EKS 集群
+## 2. 创建 EKS 集群
按照 [Rancher 文档](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md)创建 EKS 集群。进行到[安装 Rancher Helm Chart](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#8-安装-rancher-helm-chart)(最后一步)时,**停止并返回此页面**。该集群需要满足以下要求:
- EKS 1.22 版本。
@@ -20,7 +20,7 @@ title: 先决条件
- 集群中的每个节点都可以访问许可证管理器服务。
- 集群中的每个节点都可以访问 STS 服务的全局端点。
-### 3. 安装 Rancher
+## 3. 安装 Rancher
除了在 [Rancher 文档](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#8-安装-rancher-helm-chart)中指定的 Rancher 安装选项外,你还需要启用其它指标。
你可以通过 Helm CLI 使用以下选项来完成:
@@ -39,11 +39,11 @@ extraEnv:
你还需要安装 Rancher 2.6.7 或更高版本。
-### 4. 创建 OIDC 提供程序
+## 4. 创建 OIDC 提供程序
按照 [AWS 文档](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)为上一节中指定的集群创建 OIDC 提供程序。
-### 5. 创建 IAM 角色
+## 5. 创建 IAM 角色
CSP Adapter 需要 IAM 角色才能签入/签出 Entitlement。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
index 831bf952a6d..8e3d743ef27 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
@@ -4,7 +4,7 @@ title: 安装 Adapter
> **重要提示**:如果你尝试重新安装 Adapter,你可能会在长达一小时的时间内收到不合规的错误消息。
-### Rancher 与 Adapter 的兼容性矩阵
+## Rancher 与 Adapter 的兼容性矩阵
:::note 重要提示:
@@ -23,7 +23,7 @@ title: 安装 Adapter
| v2.7.5 | v2.0.2 |
-### 1. 获取对 Local 集群的访问权限
+## 1. 获取对 Local 集群的访问权限
> **注意**:只有管理员用户才能访问 Local 集群。因为 CSP Adapter 必须安装在 Local 集群中,所以此安装必须由管理员用户执行。
@@ -33,7 +33,7 @@ title: 安装 Adapter
export KUBECONFIG=$TOKEN_PATH
```
-### 2. 创建 Adapter 命名空间
+## 2. 创建 Adapter 命名空间
创建要安装 Adapter 的命名空间:
@@ -41,7 +41,7 @@ export KUBECONFIG=$TOKEN_PATH
kubectl create ns cattle-csp-adapter-system
```
-### 3. 创建证书密文
+## 3. 创建证书密文
Adapter 需要访问 Rancher 用来与 Rancher Server 通信的根 CA。有关 Rancher 支持的证书选项的更多信息,请参阅 [Chart 选项页面](../../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md)。
@@ -63,7 +63,7 @@ kubectl -n cattle-csp-adapter-system create secret generic tls-ca-additional --f
> **重要提示**:不要更改文件名或创建的密文的名称,否则可能会导致 Adapter 运行出错。
-### 4. 安装 Chart
+## 4. 安装 Chart
首先,使用以下命令添加 `rancher/charts` 仓库:
@@ -134,7 +134,7 @@ helm install rancher-csp-adapter rancher-charts/rancher-csp-adapter -f values.ya
-### 5. 管理证书更新
+## 5. 管理证书更新
如果你在[步骤 3](#3-创建证书密文) 中创建了一个用于存储自定义证书的密文,则随着证书的轮换,你将需要更新此密文。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
index f290d881ae9..ddc19f8353e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
@@ -2,19 +2,19 @@
title: 卸载 Adapter
---
-### 1. 使用 Helm 卸载 Adapter Chart:
+## 1. 使用 Helm 卸载 Adapter Chart:
```bash
helm uninstall rancher-csp-adapter -n cattle-csp-adapter-system
```
-### 2. 删除为 Adapter 创建的命名空间:
+## 2. 删除为 Adapter 创建的命名空间:
```bash
kubectl delete ns cattle-csp-adapter-system
```
-### 3. (可选)删除未完成的用户通知:
+## 3. (可选)删除未完成的用户通知:
```bash
kubectl delete RancherUserNotification csp-compliance
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/supportconfig.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/supportconfig.md
index f5f0d722487..d34958468e7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/supportconfig.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cloud-marketplace/supportconfig.md
@@ -8,7 +8,7 @@ title: Supportconfig Bundle
> **注意**:无论采用何种方法,只有管理员可以生成/下载 Supportconfig Bundle。
-### 通过 Rancher 访问
+## 通过 Rancher 访问
首先,点击汉堡菜单。然后单击 `Get Support` 按钮。
@@ -20,7 +20,7 @@ title: Supportconfig Bundle

-### 不通过 Rancher 进行访问
+## 不通过 Rancher 进行访问
首先,为安装 Rancher 的集群生成 kubeconfig。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cluster-api/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cluster-api/overview.md
index 5da4470bfd6..531464d5d9b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cluster-api/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/cluster-api/overview.md
@@ -185,7 +185,7 @@ stringData:
:::note
-请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/tasks/capi-operator/intro)
+请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/contributing/install_capi_operator)
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/fleet/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/fleet/overview.md
index cedc5ee7a36..b26b821dc23 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/fleet/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/fleet/overview.md
@@ -12,7 +12,7 @@ Fleet 是 Rancher 的一个独立项目,可以通过 Helm 安装在任何 Kube
## 架构
-有关 Fleet 如何运作的信息,请参阅[架构](./architecture)页面。
+有关 Fleet 如何运作的信息,请参阅[架构](./architecture.md)页面。
## 在 Rancher UI 中访问 Fleet
@@ -40,7 +40,7 @@ Fleet 预安装在 Rancher 中,并由 Rancher UI 中的**持续交付**选项
## Windows 支持
-有关对具有 Windows 节点的集群的支持的详细信息,请参阅 [Windows 支持](./windows-support)页面。
+有关对具有 Windows 节点的集群的支持的详细信息,请参阅 [Windows 支持](./windows-support.md)页面。
## GitHub 仓库
@@ -48,7 +48,7 @@ Fleet Helm charts 可在[此处](https://github.com/rancher/fleet/releases)获
## 在代理后使用 Fleet
-有关在代理后面使用 Fleet 的详细信息,请参阅[在代理后使用 Fleet](./use-fleet-behind-a-proxy) 页面。
+有关在代理后面使用 Fleet 的详细信息,请参阅[在代理后使用 Fleet](./use-fleet-behind-a-proxy.md) 页面。
## Helm Chart 依赖
@@ -58,7 +58,7 @@ git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖。 你
## 故障排除
-- **已知问题**:Fleet gitrepos 的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator) 创建的备份或恢复中。一旦有永久的解决方案,我们将更新社区内容。
+- **已知问题**:Fleet gitrepos 的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。一旦有永久的解决方案,我们将更新社区内容。
- **临时解决方法**:默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则有必要重新创建密文。要修改 ResourceSet 以包含要备份的额外资源,请参阅文档[此处](https://github.com/rancher/backup-restore-operator#user-flow)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester.md
index 81823b2999a..eb6bf4742bb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester.md
@@ -4,9 +4,9 @@ title: Harvester 集成
Harvester 是 Rancher 2.6.1 新增的功能,[Harvester](https://docs.harvesterhci.io/) 是基于 Kubernetes 构建的开源超融合基础架构 (HCI) 软件。Harvester 安装在裸金属服务器上,提供集成的虚拟化和分布式存储功能。虽然 Harvester 使用 Kubernetes 运行,但它不需要用户了解 Kubernetes 概念,因此是一个更加用户友好的应用。
-### 功能开关
+## 功能开关
-你可以使用 Harvester 的功能开关来管理 Harvester 在 Rancher 虚拟化管理页面的访问,用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关是默认启用的。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../pages-for-subheaders/enable-experimental-features.md)。
+你可以使用 Harvester 的功能开关来管理 Harvester 在 Rancher 虚拟化管理页面的访问,用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关是默认启用的。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
要导航到 Harvester 集群,请单击 **☰ > 虚拟化管理**。在 **Harvester 集群**页面中,单击集群以转到该 Harvester 集群的视图。
@@ -18,15 +18,15 @@ Harvester 是 Rancher 2.6.1 新增的功能,[Harvester](https://docs.harvester
* 用户只能在**虚拟化管理**页面上导入 Harvester 集群。在**集群管理**页面上导入集群是不支持的,而且会出现警告。建议你返回**虚拟化管理**页面执行此操作。
-### Harvester 主机驱动
+## Harvester 主机驱动
[Harvester 主机驱动](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) 通常可用于 Rancher 中的 RKE 和 RKE2 选项。无论 Harvester 功能开关是否启用,主机驱动都是可用的。请注意,默认情况下主机驱动是关闭的。用户只能通过**集群管理**页面在 Harvester 上创建 RKE 或 RKE2 集群。
Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher UI 不支持。这是因为 `.ISO` 镜像通常需要额外的设置,这会干扰干净的部署(即无需用户干预),并且它们通常不用于云环境。
-如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../pages-for-subheaders/about-provisioning-drivers.md#主机驱动)。
+如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)。
-### 端口要求
+## 端口要求
可以在[此处](https://docs.harvesterhci.io/v1.1/install/requirements#networking)找到 Harvester 集群的端口要求。
@@ -37,7 +37,7 @@ Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher
对于其他集群(例如 K3s 和 RKE1)的其他端口要求,请参阅[这些文档](https://docs.harvesterhci.io/v1.1/install/requirements/#guest-clusters)。
-### 限制
+## 限制
---
**仅适用于 Rancher v2.6.1 和 v2.6.2**:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester/overview.md
index ac720bec060..b5ee0ff89eb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/harvester/overview.md
@@ -8,7 +8,7 @@ title: 概述
[Harvester](https://docs.harvesterhci.io/) 是 Rancher v2.6.1 新增的功能,是基于 Kubernetes 构建的开源超融合基础架构(HCI)软件。Harvester 安装在裸金属服务器上,提供集成的虚拟化和分布式存储功能。虽然 Harvester 使用 Kubernetes 运行,但它不需要用户了解 Kubernetes 概念,这使得它更加用户友好。
-### 功能开关
+## 功能开关
Harvester 功能开关用于管理对 Rancher 中虚拟化管理(VM)页面的访问,用户可以直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关默认启用。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
@@ -22,15 +22,15 @@ Harvester 功能开关用于管理对 Rancher 中虚拟化管理(VM)页面
- 用户只能在虚拟化管理页面上导入 Harvester 集群。不支持在集群管理页面上导入集群,并且会出现警告,建议你返回虚拟化管理页面执行此操作。
-### Harvester 主机驱动
+## Harvester 主机驱动
[Harvester 主机驱动](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/)通常可用于 Rancher 中的 RKE 和 RKE2 选项。无论 Harvester 功能开关是否启用,主机驱动都是可用的。请注意,主机驱动默认处于关闭状态。用户只能通过集群管理页面在 Harvester 上创建 RKE 或 RKE2 集群。
Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher UI 是不支持的。这是因为 `.ISO` 镜像通常需要额外的设置,这会干扰干净的部署(即无需用户干预),并且它们通常不用于云环境。
-如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers#主机驱动)。
+如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)。
-### 端口要求
+## 端口要求
Harvester 集群的端口要求可以在[此处](https://docs.harvesterhci.io/v1.1/install/requirements#networking)找到。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.md
new file mode 100644
index 00000000000..24f35f24e37
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.md
@@ -0,0 +1,18 @@
+---
+title: Rancher 中的集成
+---
+
+
+
+
+
+Prime 是 Rancher 生态系统的企业级产品,具有更高的安全性、更长的生命周期和对 Prime 专有文档的访问权限。Rancher Prime 安装资产托管在受信任的 SUSE 注册表上,由 Rancher 拥有和管理。受信任的 Prime 注册表仅包括经过社区测试的稳定版本。
+
+Prime 还提供生产支持选项,以及根据你的商业需求定制的订阅附加组件。
+
+要了解更多信息并开始使用 Rancher Prime,请访问[本页](https://www.rancher.com/quick-start)。
+
+import DocCardList from '@theme/DocCardList';
+import { useCurrentSidebarCategory } from '@docusaurus/theme-common/internal';
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.mdx b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.mdx
deleted file mode 100644
index c0a824cc9da..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/integrations-in-rancher.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
----
-title: Rancher 中的集成
----
-
-
-
-
-
-import { Card, CardSection } from "@site/src/components/CardComponents";
-import { RocketRegular } from "@fluentui/react-icons";
-
-Prime 是 Rancher 生态系统的企业级产品,具有更高的安全性、更长的生命周期和对 Prime 专有文档的访问权限。Rancher Prime 安装资产托管在受信任的 SUSE 注册表上,由 Rancher 拥有和管理。受信任的 Prime 注册表仅包括经过社区测试的稳定版本。
-
-Prime 还提供生产支持选项,以及根据你的商业需求定制的订阅附加组件。
-
-要了解更多信息并开始使用 Rancher Prime,请访问[本页](https://www.rancher.com/quick-start)。
-
- }>
-
-
-
-
-
-
-
-
-
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/configuration-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/configuration-options.md
index 800b4e576da..e0097356be2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/configuration-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/configuration-options.md
@@ -6,15 +6,15 @@ title: 配置选项
-### Egress 支持
+## Egress 支持
默认情况下,Egress 网关是禁用的,但你可以在安装或升级时使用 values.yaml 或[覆盖文件](#覆盖文件)启用它。
-### 启用自动 Sidecar 注入
+## 启用自动 Sidecar 注入
默认情况下,自动 sidecar 注入是禁用的。要启用此功能,请在安装或升级时在 values.yaml 中设置 `sidecarInjectorWebhook.enableNamespacesByDefault=true`。这会自动将 Istio sidecar 注入到所有已部署的新命名空间。
-### 覆盖文件
+## 覆盖文件
覆盖文件用于为 Istio 进行更广泛的配置。它允许你更改 [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/) 中可用的任何值。你可以自定义默认安装以满足你的需求。
@@ -22,7 +22,7 @@ title: 配置选项
有关覆盖文件的更多信息,请参阅 [Istio 文档](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings)
-### 选择器和抓取配置
+## 选择器和抓取配置
Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=false`,即在默认情况下跨所有命名空间进行监控。这样,你可以查看部署在具有 `istio-injection=enabled` 标签的命名空间中的资源的流量、指标和图。
@@ -30,14 +30,14 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
详情请参阅[本节](selectors-and-scrape-configurations.md)。
-### 在具有 Pod 安全策略的情况下启用 Istio
+## 在具有 Pod 安全策略的情况下启用 Istio
详情请参阅[本节](pod-security-policies.md)。
-### 在 RKE2 集群上安装 Istio 的其他步骤
+## 在 RKE2 集群上安装 Istio 的其他步骤
详情请参阅[本节](install-istio-on-rke2-cluster.md)。
-### 项目网络隔离的其他步骤
+## 项目网络隔离的其他步骤
详情请参阅[本节](project-network-isolation.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
index 2ceed9acdce..44370e67421 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
@@ -19,7 +19,7 @@ Istio CNI 插件不再要求每个应用 pod 具有特权 `NET_ADMIN` 容器。
2. [启用 CNI](#2-启用-cni)
3. [验证 CNI 是否正常工作](#3-验证-cni-是否正常工作)
-### 1. 将 PodSecurityPolicy 设置为不受限制
+## 1. 将 PodSecurityPolicy 设置为不受限制
不受限制的 PSP 支持安装 Istio。
@@ -31,7 +31,7 @@ Istio CNI 插件不再要求每个应用 pod 具有特权 `NET_ADMIN` 容器。
1. 找到**项目: System**,然后选择 **⋮ > 编辑配置**。
1. 将 Pod 安全策略选项更改为不受限制,然后单击**保存**。
-### 2. 启用 CNI
+## 2. 启用 CNI
通过 **Apps** 安装或升级 Istio 时:
@@ -47,7 +47,7 @@ istio_cni.enabled: true
在集群中启用 CNI 后,Istio 应该能成功安装。
-### 3. 验证 CNI 是否正常工作
+## 3. 验证 CNI 是否正常工作
通过部署[示例应用](https://istio.io/latest/docs/examples/bookinfo/)或部署你自己的应用,来验证 CNI 是否正常工作。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
index 5a40d032482..9828c2b351d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
@@ -9,7 +9,7 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
如果你想将 Prometheus 限制为特定的命名空间,请设置 `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`。完成此操作后,你需要添加其他配置来继续监控你的资源。
-### 通过将 ignoreNamespaceSelectors 设置为 True 来限制对特定命名空间的监控
+## 通过将 ignoreNamespaceSelectors 设置为 True 来限制对特定命名空间的监控
要限制对特定命名空间的监控,你需要编辑 `ignoreNamespaceSelectors` Helm Chart 选项。你可以在安装或升级 Monitoring Helm Chart 时配置此选项:
@@ -18,14 +18,14 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
**结果**:Prometheus 将仅用于特定命名空间。换言之,你需要设置以下配置之一才能继续在各种仪表板中查看数据。
-### 让 Prometheus 检测其他命名空间中的资源
+## 让 Prometheus 检测其他命名空间中的资源
如果设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`,则有两种方法让 Prometheus 检测其他命名空间中的资源:
- **监控特定的命名空间**:在命名空间中添加一个 ServiceMonitor 或 PodMonitor 以及要抓取的目标。
- **跨命名空间监控**:将 `additionalScrapeConfig` 添加到你的 rancher-monitoring 实例,从而抓取所有命名空间中的所有目标。
-### 监控特定命名空间:创建 ServiceMonitor 或 PodMonitor
+## 监控特定命名空间:创建 ServiceMonitor 或 PodMonitor
此选项用于定义在特定命名空间中要监控的服务或 pod。
@@ -81,7 +81,7 @@ spec:
targetLabel: pod_name
```
-### 跨命名空间监控:将 ignoreNamespaceSelectors 设置为 False
+## 跨命名空间监控:将 ignoreNamespaceSelectors 设置为 False
此设置为 Prometheus 提供额外的抓取配置来实现跨命名空间监控。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/cpu-and-memory-allocations.md
index 3a19c21dcc5..c1aad59c6ee 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/cpu-and-memory-allocations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/istio/cpu-and-memory-allocations.md
@@ -41,7 +41,7 @@ Kubernetes 中的资源请求指的是,除非该节点至少具有指定数量
1. 在左侧导航栏中,点击 **Apps**。
1. 点击**已安装的应用**。
1. 转到 `istio-system` 命名空间。在某个 Istio 工作负载中(例如 `rancher-istio`),点击**⋮ > 编辑/升级**。
-1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](cpu-and-memory-allocations.md#编辑覆盖文件)。
+1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](./configuration-options/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](#编辑覆盖文件)。
1. 更改 CPU 或内存分配、调度各个组件的节点,或节点容忍度。
1. 点击**升级**。然后,更改就能启用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
index b45cce6b4d7..8752645ed96 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
@@ -13,7 +13,7 @@ K3s 是一款轻量级、完全兼容的 Kubernetes 发行版,专为一系列
### K3s 与 Rancher
- Rancher 允许在一系列平台上轻松配置 K3s,包括 Amazon EC2、DigitalOcean、Azure、vSphere 或现有服务器。
-- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup#cluster-management-capabilities-by-cluster-type)。
+- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md#不同类型集群的管理功能)。
## RKE2
@@ -31,4 +31,4 @@ RKE2 的主要特性包括:
## RKE2 与 Rancher
- Rancher 允许在一系列平台上轻松配置 RKE2,包括 Amazon EC2、DigitalOcean、Azure、vSphere 或现有服务器。
-- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup#cluster-management-capabilities-by-cluster-type)。
+- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md#不同类型集群的管理功能)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
index ab7f461150a..3f02e5fc4f3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
@@ -4,7 +4,7 @@ title: Flows 和 ClusterFlows
有关如何配置 `Flow` 和 `ClusterFlow` 的完整详细信息,请参阅 [Logging Operator 文档](https://kube-logging.github.io/docs/configuration/flow/)。
-有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../../../pages-for-subheaders/logging.md#日志缓冲区导致-pod-过载)。
+有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../logging.md#日志缓冲区导致-pod-过载)。
## Flows
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
index 1ad5b65f48b..8e7d608e4a5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
@@ -4,7 +4,7 @@ title: Outputs 和 ClusterOutputs
有关如何配置 `Flow` 和 `ClusterFlow` 的完整详细信息,请参阅 [Logging Operator 文档](https://kube-logging.github.io/docs/configuration/flow/)。
-有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../../../pages-for-subheaders/logging.md#日志缓冲区导致-pod-过载)。
+有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../logging.md#日志缓冲区导致-pod-过载)。
## Outputs
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-architecture.md
index c7e0510ebe9..cbaeda896e3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-architecture.md
@@ -6,7 +6,7 @@ title: 架构
有关 Logging Operator 工作原理的更多详细信息,请参阅[官方文档](https://kube-logging.github.io/docs/#architecture)。
-### Logging Operator 工作原理
+## Logging Operator 工作原理
Logging Operator 自动部署和配置 Kubernetes 日志流水线。它会在每个节点上部署和配置一个 Fluent Bit DaemonSet,从而收集节点文件系统中的容器和应用程序日志。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-helm-chart-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-helm-chart-options.md
index 5c08fb1166a..40bae8aa4de 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-helm-chart-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/logging-helm-chart-options.md
@@ -2,7 +2,7 @@
title: rancher-logging Helm Chart 选项
---
-### 启用/禁用 Windows 节点 Logging
+## 启用/禁用 Windows 节点 Logging
要启用或禁用 Windows 节点 Logging,你可以在 `values.yaml` 中将 `global.cattle.windows.enabled` 设置为 `true` 或 `false`。
@@ -17,7 +17,7 @@ title: rancher-logging Helm Chart 选项
:::
-### 使用自定义 Docker 根目录
+## 使用自定义 Docker 根目录
如果使用了自定义 Docker 根目录,你可以在 `values.yaml` 中设置 `global.dockerRootDirectory`。
@@ -27,11 +27,11 @@ title: rancher-logging Helm Chart 选项
如果集群中有任何 Windows 节点,则更改将不适用于这些节点。
-### 为自定义污点添加 NodeSelector 设置和容忍度
+## 为自定义污点添加 NodeSelector 设置和容忍度
你可以添加 `nodeSelector` 设置,并通过编辑 Logging Helm Chart 值来添加其他`容忍度`。有关详细信息,请参阅[此页面](taints-and-tolerations.md)。
-### 启用 Logging 应用程序以使用 SELinux
+## 启用 Logging 应用程序以使用 SELinux
:::note 要求:
@@ -41,11 +41,11 @@ Logging v2 已在 RHEL/CentOS 7 和 8 上使用 SELinux 进行了测试。
[安全增强型 Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) 是对 Linux 的安全增强。被政府机构使用之后,SELinux 已成为行业标准,并在 CentOS 7 和 8 上默认启用。
-要配合使用 Logging V2 与 SELinux,我们建议你根据[此说明](../../pages-for-subheaders/selinux-rpm.md)安装 `rancher-selinux` RPM。
+要配合使用 Logging V2 与 SELinux,我们建议你根据[此说明](../../reference-guides/rancher-security/selinux-rpm/selinux-rpm.md)安装 `rancher-selinux` RPM。
然后,在安装 Logging 应用程序时,在 `values.yaml` 中将 `global.seLinux.enabled` 更改为 `true`,使 Chart 支持 SELinux。
-### 其他日志来源
+## 其他日志来源
默认情况下,Rancher 会收集所有类型集群的 [controlplane 组件](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components)和[节点组件](https://kubernetes.io/docs/concepts/overview/components/#node-components)的日志。
@@ -68,7 +68,7 @@ Logging v2 已在 RHEL/CentOS 7 和 8 上使用 SELinux 进行了测试。
如果你已经使用了云提供商的日志解决方案,例如 AWS CloudWatch 或 Google Cloud Operations Suite(以前称为 Stackdriver),由于原生解决方案可以不受限制地访问所有日志,因此你无需启用此选项。
-### Systemd 配置
+## Systemd 配置
在 Rancher Logging 中,你必须为 K3s 和 RKE2 Kubernetes 发行版配置 `SystemdLogPath`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/taints-and-tolerations.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/taints-and-tolerations.md
index 16a6a08fd26..d109b29f8ea 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/taints-and-tolerations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/logging/taints-and-tolerations.md
@@ -15,8 +15,7 @@ title: 处理污点和容忍度
- [Rancher 日志堆栈中的默认实现](#rancher-日志堆栈中的默认实现)
- [为自定义污点添加 NodeSelector 设置和容忍度](#为自定义污点添加-nodeselector-设置和容忍度)
-
-### Rancher 日志堆栈中的默认实现
+## Rancher 日志堆栈中的默认实现
默认情况下,Rancher 使用 `cattle.io/os=linux` 来将污点应用到所有 Linux 节点,而不影响 Windows 节点。
日志堆栈 pod 具有针对此污点的`容忍度`,因此它们能够运行在 Linux 节点上。
@@ -43,7 +42,7 @@ spec:
你可以对 Rancher 现有的污点或你自己的自定义污点执行相同的操作。
-### 为自定义污点添加 NodeSelector 设置和容忍度
+## 为自定义污点添加 NodeSelector 设置和容忍度
如果要添加你自己的 `nodeSelector` 设置,或者要为其他污点添加 `容忍度`,你可以将以下内容传递给 Chart 的值:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn.md
index ea34d0938ae..53ffddc28d2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn.md
@@ -21,7 +21,7 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现

-### 使用 Rancher 安装 Longhorn
+## 使用 Rancher 安装 Longhorn
1. 满足所有[安装要求](https://longhorn.io/docs/latest/deploy/install/#installation-requirements)。
1. 转到要安装 Longhorn 的集群。
@@ -33,14 +33,14 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
**结果**:Longhorn 已部署到 Kubernetes 集群中。
-### 从 Rancher UI 访问 Longhorn
+## 从 Rancher UI 访问 Longhorn
1. 转到安装了 Longhorn 的集群。在左侧导航菜单中,单击 **Longhorn**。
1. 在此页面上,你可以编辑 Longhorn 管理的 Kubernetes 资源。要查看 Longhorn UI,请单击**概述**中的 **Longhorn** 按钮。
**结果**:你将转到 Longhorn UI,你可以在那里管理 Longhorn 卷及其在 Kubernetes 集群中的副本,还可以查看位于另一个 Kubernetes 集群或 S3 中的 Longhorn 存储辅助备份。
-### 从 Rancher UI 卸载 Longhorn
+## 从 Rancher UI 卸载 Longhorn
1. 转到安装了 Longhorn 的集群,然后单击 **Apps**。
1. 点击**已安装的应用**。
@@ -49,15 +49,15 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
**结果**:Longhorn 已被卸载。
-### GitHub 仓库
+## GitHub 仓库
Longhorn 项目在[此处](https://github.com/longhorn/longhorn)。
-### 文档
+## 文档
Longhorn 文档在[此处](https://longhorn.io/docs/)。
-### 架构
+## 架构
Longhorn 为每个卷创建专用的存储控制器,并在存储在多个节点上的多个副本之间同步复制该卷。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn/overview.md
index b5298c3c21d..dd02b06d713 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/longhorn/overview.md
@@ -25,7 +25,7 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被

-### 使用 Rancher 安装 Longhorn
+## 使用 Rancher 安装 Longhorn
1. 满足所有[安装要求](https://longhorn.io/docs/latest/deploy/install/#installation-requirements)。
1. 转到要安装 Longhorn 的集群。
@@ -37,14 +37,14 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被
**结果**:Longhorn 已部署到 Kubernetes 集群中。
-### 从 Rancher UI 访问 Longhorn
+## 从 Rancher UI 访问 Longhorn
1. 转到安装了 Longhorn 的集群。在左侧导航菜单中,单击 **Longhorn**。
1. 在此页面上,你可以编辑 Longhorn 管理的 Kubernetes 资源。要查看 Longhorn UI,请单击**概述**中的 **Longhorn** 按钮。
**结果**:你将转到 Longhorn UI,在这里你可以管理 Kubernetes 集群中的 Longhorn 卷及其副本,以及可能存在于另一个 Kubernetes 集群或 S3 中的 Longhorn 存储辅助备份。
-### 从 Rancher UI 卸载 Longhorn
+## 从 Rancher UI 卸载 Longhorn
1. 转到安装了 Longhorn 的集群,然后单击 **Apps**。
1. 点击**已安装的应用**。
@@ -53,15 +53,15 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被
**结果**:Longhorn 已被卸载。
-### GitHub 仓库
+## GitHub 仓库
Longhorn 项目可在[此处](https://github.com/longhorn/longhorn)获取。
-### 文档
+## 文档
Longhorn 文档在[此处](https://longhorn.io/docs/)。
-### 架构
+## 架构
Longhorn 为每个卷创建专用的存储控制器,并在多个节点上存储的多个副本之间同步复制该卷。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
index cebb2e4323f..b181ee890cb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
@@ -15,7 +15,7 @@ description: Prometheus 允许你查看来自不同 Rancher 和 Kubernetes 对
使用 `rancher-monitoring` 应用程序,你可以快速部署领先的开源监控和告警解决方案到你的集群上。
-### 功能
+## 功能
Prometheus 支持查看 Rancher 和 Kubernetes 对象的指标。通过使用时间戳,Prometheus 能让你通过 Rancher UI 或 Grafana(与 Prometheus 一起部署的分析查看平台)以更容易阅读的图表和视觉形式来查询和查看这些指标。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
index dfeefda6f97..978ff27d4f3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
@@ -107,7 +107,7 @@ Monitoring 还会创建其他 `ClusterRole`,这些角色默认情况下不会
| 角色 | 用途 |
| ------------------------------| ---------------------------|
-| monitoring-ui-view | _自 Monitoring v2 14.5.100+ 起可用_ 此 ClusterRole 允许用户在 Rancher UI 中查看指定集群的指标图。这是通过授予对外部监控 UI 的只读访问权限来实现的。具有此角色的用户有权限列出 Prometheus、Alertmanager 和 Grafana 端点,并通过 Rancher 代理向 Prometheus、Grafana 和 Alertmanager UI 发出 GET 请求。 |
+| monitoring-ui-view | _自 Monitoring v2 14.5.100+ 起可用_ 此 ClusterRole 允许用户在 Rancher UI 中查看指定集群的指标图。这是通过授予对外部监控 UI 的只读访问权限来实现的。具有此角色的用户有权限列出 Prometheus、Alertmanager 和 Grafana 端点,并通过 Rancher 代理向 Prometheus、Grafana 和 Alertmanager UI 发出 GET 请求。 |
### 使用 kubectl 分配 Role 和 ClusterRole
@@ -203,7 +203,7 @@ Rancher 部署的默认角色(即 cluster-owner、cluster-member、project-own
| Rancher 角色 | Kubernetes ClusterRole | 可用 Rancher 版本 | 可用 Monitoring V2 版本 |
|--------------------------|-------------------------------|-------|------|
-| 查看 Monitoring\* | [monitoring-ui-view](#monitoring-ui-view) | 2.4.8+ | 9.4.204+ |
+| 查看 Monitoring\* | [monitoring-ui-view](#具有-rancher-权限的用户) | 2.4.8+ | 9.4.204+ |
\* 如果某个用户绑定了 Rancher 的 **View Monitoring** 角色,该用户只有在有 UI 链接时才有权访问外部 Monitoring UI。要访问 Monitoring Pane 以获取这些链接,用户必须是至少一个项目的项目成员。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector.md
index 42a2e900c59..f14ae63cbb8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector.md
@@ -2,13 +2,13 @@
title: NeuVector 集成
---
-### Rancher 中的 NeuVector 集成
+## Rancher 中的 NeuVector 集成
-[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../pages-for-subheaders/rancher-security.md)。
+[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../reference-guides/rancher-security/rancher-security.md)。
NeuVector 可以通过 Helm Chart 启用。你可以在 **Apps** 或 Rancher UI 中的 **Cluster Tools** 中安装该 Chart。安装 Helm Chart 后,用户可以轻松地[在 Rancher 中部署和管理 NeuVector 集群](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace)。
-### 使用 Rancher 安装 NeuVector
+## 使用 Rancher 安装 NeuVector
Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可以在 Rancher 中直接跳转,然后部署和管理 NeuVector 集群。
@@ -40,12 +40,12 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击左侧导航栏底部的**集群工具**。
1. 按照上面的步骤 4 相应地选择你的容器运行时,然后再次单击**安装**。
-### 从 Rancher UI 访问 NeuVector
+## 从 Rancher UI 访问 NeuVector
1. 导航到安装了 NeuVector 的集群的 Cluster Explorer。在左侧导航栏中,单击 **NeuVector**。
1. 单击外部链接以转到 NeuVector UI。选择链接后,用户必须接受`最终用户许可协议`才能访问 NeuVector UI。
-### 从 Rancher UI 卸载 NeuVector
+## 从 Rancher UI 卸载 NeuVector
**通过 "Apps" 卸载**:
@@ -58,15 +58,15 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击 **☰ > 集群管理**。
1. 单击屏幕左下角的**集群工具**,然后单击 NeuVector Chart 下方的垃圾桶图标。如果需要,选择`删除与此应用关联的 CRD`,然后单击**删除**。
-### GitHub 仓库
+## GitHub 仓库
NeuVector 项目在[这里](https://github.com/neuvector/neuvector)。
-### 文档
+## 文档
NeuVector 文档在[这里](https://open-docs.neuvector.com/)。
-### 架构
+## 架构
NeuVector 安全解决方案包含四种类型的安全容器,分别是 Controller、Enforcer、Manager 和 Scanner。它还提供了一个称为 All-in-One 的特殊容器(主要用于 Docker 原生部署),能将 Controller、Enforcer 和 Manager 功能组合在一个容器中。此外,还有一个 Updater,运行该程序时会更新 CVE 数据库。
@@ -87,7 +87,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
要了解有关 NeuVector 架构的更多信息,请参阅[此处](https://open-docs.neuvector.com/basics/overview#architecture)。
-### CPU 和内存分配
+## CPU 和内存分配
以下是默认 NeuVector Chart 安装部署的最低计算资源推荐。请注意,未设置资源限制。
@@ -101,7 +101,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
\* Controller、Manager 和 Scanner 容器合计至少需要 1GB 内存。
-### 强化集群支持 - Calico 和 Canal
+## 强化集群支持 - Calico 和 Canal
@@ -158,7 +158,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
-### 启用 SELinux 的集群支持 - Calico 和 Canal
+## 启用 SELinux 的集群支持 - Calico 和 Canal
要在 RKE2 集群上启用 SELinux,请执行以下步骤:
@@ -175,12 +175,12 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{
kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}'
```
-### 离线环境中的集群支持
+## 离线环境中的集群支持
- 所有 NeuVector 组件都可部署在离线环境中的集群上,无需任何额外配置。
-### 支持限制
+## 支持限制
* 目前仅支持管理员和集群所有者。
@@ -189,7 +189,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
* Windows 集群不支持 NeuVector。
-### 其他限制
+## 其他限制
* 目前,如果 NeuVector partner Chart 已存在,则 NeuVector 功能 Chart 的安装会失败。要解决此问题,请卸载 NeuVector partner Chart 并重新安装 NeuVector 功能 Chart。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector/overview.md b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector/overview.md
index c6627aa62a0..79bb6f1760c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/integrations-in-rancher/neuvector/overview.md
@@ -6,13 +6,13 @@ title: 概述
-### Rancher 中的 NeuVector 集成
+## Rancher 中的 NeuVector 集成
[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../../reference-guides/rancher-security)。
NeuVector 可以通过 Helm Chart 启用。你可以在 **Apps** 或 Rancher UI 中的 **Cluster Tools** 中安装该 Chart。安装 Helm Chart 后,用户可以轻松地[在 Rancher 中部署和管理 NeuVector 集群](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace)。
-### 使用 Rancher 安装 NeuVector
+## 使用 Rancher 安装 NeuVector
Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可以在 Rancher 中直接跳转,然后部署和管理 NeuVector 集群。
@@ -44,12 +44,12 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击左侧导航栏底部的**集群工具**。
1. 按照上面的步骤 4 相应地选择你的容器运行时,然后再次单击**安装**。
-### 从 Rancher UI 访问 NeuVector
+## 从 Rancher UI 访问 NeuVector
1. 导航到安装了 NeuVector 的集群的 Cluster Explorer。在左侧导航栏中,单击 **NeuVector**。
1. 单击外部链接以转到 NeuVector UI。选择链接后,用户必须接受`最终用户许可协议`才能访问 NeuVector UI。
-### 从 Rancher UI 卸载 NeuVector
+## 从 Rancher UI 卸载 NeuVector
**通过 Apps 卸载:**
@@ -62,15 +62,15 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 单击 **☰ > 集群管理**。
1. 单击屏幕左下角的**集群工具**,然后单击 NeuVector Chart 下方的垃圾桶图标。如果需要,选择`删除与此应用关联的 CRD`,然后单击**删除**。
-### GitHub 仓库
+## GitHub 仓库
NeuVector 项目在[这里](https://github.com/neuvector/neuvector)。
-### 文档
+## 文档
NeuVector 文档在[这里](https://open-docs.neuvector.com/)。
-### 架构
+## 架构
NeuVector 安全解决方案包含四种类型的安全容器,分别是 Controller、Enforcer、Manager 和 Scanner。它还提供了一个称为 All-in-One 的特殊容器(主要用于 Docker 原生部署),能将 Controller、Enforcer 和 Manager 功能组合在一个容器中。此外,还有一个 Updater,运行该程序时会更新 CVE 数据库。
@@ -91,7 +91,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
要了解有关 NeuVector 架构的更多信息,请参阅[此处](https://open-docs.neuvector.com/basics/overview#architecture)。
-### CPU 和内存分配
+## CPU 和内存分配
以下是默认 NeuVector Chart 安装部署的最低计算资源推荐。请注意,未设置资源限制。
@@ -104,7 +104,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
\* Controller、Manager 和 Scanner 容器合计至少需要 1GB 内存。
-### 强化集群支持 - Calico 和 Canal
+## 强化集群支持 - Calico 和 Canal
@@ -159,7 +159,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
-### 启用 SELinux 的集群支持 - Calico 和 Canal
+## 启用 SELinux 的集群支持 - Calico 和 Canal
要在 RKE2 集群上启用 SELinux,请执行以下步骤:
@@ -175,11 +175,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{
kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}'
```
-### 离线环境中的集群支持
+## 离线环境中的集群支持
- 所有 NeuVector 组件都可部署在离线环境中的集群上,无需任何额外配置。
-### 支持限制
+## 支持限制
- 目前仅支持管理员和集群所有者。
@@ -187,7 +187,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
- Windows 集群不支持 NeuVector。
-### 其他限制
+## 其他限制
- 目前,如果 NeuVector partner Chart 已存在,则 NeuVector 功能 Chart 的安装会失败。要解决此问题,请卸载 NeuVector partner Chart 并重新安装 NeuVector 功能 Chart。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md
deleted file mode 100644
index 40e099ac09a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
-
-### 多集群应用
-
-在 2.5 之前的 Rancher 版本中,多集群应用功能用于跨集群部署应用。我们已弃用多集群应用功能,但你仍然可以在 Rancher 2.5 中使用该功能。
-
-详情请参阅[此文档](../how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
index c48a1d04995..f7c470b45c9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
@@ -4,7 +4,7 @@ title: 监控最佳实践
配置合理的监控和告警规则对于安全、可靠地运行生产环境中的工作负载至关重要。在使用 Kubernetes 和 Rancher 时也是如此。幸运的是,你可以使用集成的监控和告警功能来简化整个过程。
-[Rancher 监控文档](../../../pages-for-subheaders/monitoring-and-alerting.md)描述了如何设置完整的 Prometheus 和 Grafana。这是开箱即用的功能,它将从集群中的所有系统和 Kubernetes 组件中抓取监控数据,并提供合理的仪表板和告警。但为了实现可靠的设置,你还需要监控你的工作负载并使 Prometheus 和 Grafana 适应你的特定用例和集群规模。本文档将为你提供这方面的最佳实践。
+[Rancher 监控文档](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)描述了如何设置完整的 Prometheus 和 Grafana。这是开箱即用的功能,它将从集群中的所有系统和 Kubernetes 组件中抓取监控数据,并提供合理的仪表板和告警。但为了实现可靠的设置,你还需要监控你的工作负载并使 Prometheus 和 Grafana 适应你的特定用例和集群规模。本文档将为你提供这方面的最佳实践。
## 监控内容
@@ -86,7 +86,7 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
如果你有一个(微)服务架构,在该架构中集群的多个单独的工作负载相互通信,那么拥有这些流量的详细指标和跟踪是非常重要的,因为这可以帮助你了解所有这些工作负载之间的通信方式,以及问题或瓶颈可能出现的地方。
-当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](../../../pages-for-subheaders/istio.md)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。
+当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](../../../integrations-in-rancher/istio/istio.md)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。
## 真实用户监控
@@ -94,7 +94,7 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
## 安全监控
-除了通过监控工作负载来检测性能、可用性或可扩展性之外,你还应该监控集群和运行在集群中的工作负载,来发现潜在的安全问题。一个好的做法是经常运行 [CIS 扫描](../../../pages-for-subheaders/cis-scan-guides.md)并发出告警,来检查集群是否按照安全最佳实践进行配置。
+除了通过监控工作负载来检测性能、可用性或可扩展性之外,你还应该监控集群和运行在集群中的工作负载,来发现潜在的安全问题。一个好的做法是经常运行 [CIS 扫描](../../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)并发出告警,来检查集群是否按照安全最佳实践进行配置。
对于工作负载,你可以查看 Kubernetes 和 Container 安全解决方案,例如 [NeuVector](https://www.suse.com/products/neuvector/)、[Falco](https://falco.org/)、[Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/) 和 [SysDig](https://sysdig.com/)。
@@ -108,4 +108,4 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
如果告警开始发送,但你暂时无法处理,你也可以将告警静默一定时间,以便以后查看。
-如果需要了解更多关于如何设置告警和通知通道的信息,请访问 [Rancher 文档中心](../../../pages-for-subheaders/monitoring-and-alerting.md)。
+如果需要了解更多关于如何设置告警和通知通道的信息,请访问 [Rancher 文档中心](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
index e12f1db2498..9cd0bd1ca15 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
@@ -6,18 +6,18 @@ title: Rancher 管理集群的最佳实践
-### Logging
+## Logging
有关集群级别日志和应用日志的建议,请参见 [Logging 最佳实践](logging-best-practices.md)。
-### Monitoring
+## Monitoring
配置合理的监控和告警规则对于安全、可靠地运行生产环境中的工作负载至关重要。有关更多建议,请参阅[最佳实践](monitoring-best-practices.md)。
-### 设置容器的技巧
+## 设置容器的技巧
配置良好的容器可以极大地提高环境的整体性能和安全性。有关容器设置的建议,请参见[设置容器的技巧](tips-to-set-up-containers.md)。
-### Rancher 管理 vSphere 集群的最佳实践
+## Rancher 管理 vSphere 集群的最佳实践
[Rancher 管理 vSphere 集群的最佳实践](rancher-managed-clusters-in-vsphere.md)概述了在 vSphere 环境中配置下游 Rancher 集群的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
index d48c0cf2af2..d368853d2e5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
@@ -8,7 +8,7 @@ title: 设置容器的技巧
如果你需要了解容器安全的详细信息,也可以参见 Rancher 的[容器安全指南](https://rancher.com/complete-guide-container-security)。
-### 使用通用容器操作系统
+## 使用通用容器操作系统
在可能的情况下,你应该尽量在通用的容器基础操作系统上进行标准化。
@@ -16,17 +16,20 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
流行的发行版如 Ubuntu、Fedora 和 CentOS 等都经过了大量的测试,并提供了更多的功能。
-### 使用 From scratch 容器
+## 使用 From scratch 容器
+
如果你的微服务是一个独立的静态二进制,你应该使用 `From scratch` 容器。
`FROM scratch` 容器是一个[官方 Docker 镜像](https://hub.docker.com/_/scratch),它是空的,这样你就可以用它来设计最小的镜像。
这个镜像这将具有最小的攻击层和最小的镜像大小。
-### 以非特权方式运行容器进程
+## 以非特权方式运行容器进程
+
在可能的情况下,在容器内运行进程时使用非特权用户。虽然容器运行时提供了隔离,但仍然可能存在漏洞和攻击。如果容器以 root 身份运行,无意中或意外的主机挂载也会受到影响。有关为 Pod 或容器配置安全上下文的详细信息,请参见 [Kubernetes 文档](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)。
-### 定义资源限制
+## 定义资源限制
+
你应该将 CPU 和内存限制应用到你的 Pod 上。这可以帮助管理 worker 节点上的资源,并避免发生故障的微服务影响其他微服务。
在标准 Kubernetes 中,你可以设置命名空间级别的资源限制。在 Rancher 中,你可以设置项目级别的资源限制,项目内的所有命名空间都会继承这些限制。详情请参见 Rancher 官方文档。
@@ -35,7 +38,8 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
有关如何在[容器级别](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container)和命名空间级别设置资源限制的更多信息,请参见 Kubernetes 文档。
-### 定义资源需求
+## 定义资源需求
+
你应该将 CPU 和内存要求应用到你的 Pod 上。这对于通知调度器需要将你的 pod 放置在哪种类型的计算节点上,并确保它不会过度配置该节点资源至关重要。在 Kubernetes 中,你可以通过在 pod 的容器规范的资源请求字段中定义 `resources.requests` 来设置资源需求。详情请参见 [Kubernetes 文档](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container)。
:::note
@@ -46,7 +50,8 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
建议在容器级别上定义资源需求,否则,调度器会认为集群加载对你的应用没有帮助。
-### 配置存活和就绪探测器
+## 配置存活和就绪探测器
+
你可以为你的容器配置存活探测器和就绪探测器。如果你的容器不是完全崩溃,Kubernetes 是不会知道它是不健康的,除非你创建一个可以报告容器状态的端点或机制。或者,确保你的容器在不健康的情况下停止并崩溃。
Kubernetes 文档展示了如何[为容器配置存活和就绪探测器](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
index 8ffe650d009..efdfde73d91 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
@@ -39,7 +39,7 @@ title: 在 vSphere 环境中安装 Rancher
### 根据 Rancher 文档确定虚拟机的大小
-请参阅[安装要求](../../../pages-for-subheaders/installation-requirements.md)。
+请参阅[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
### 利用虚拟机模板来构建环境
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/rancher-server.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/rancher-server.md
index d297b395a0a..00202488669 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/rancher-server.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/rancher-server.md
@@ -8,14 +8,14 @@ title: Rancher Server 的最佳实践
本指南介绍了让 Rancher 管理下游 Kubernetes 集群的 Rancher Server 运行建议。
-### 推荐的架构和基础设施
+## 推荐的架构和基础设施
有关在高可用 Kubernetes 集群上设置 Rancher Server 的通用建议,请参见[本指南](tips-for-running-rancher.md)。
-### 部署策略
+## 部署策略
[本指南](rancher-deployment-strategy.md)旨在帮助你选择部署策略(区域部署/中心辐射型部署),来让 Rancher Server 更好地管理下游 Kubernetes 集群。
-### 在 vSphere 环境中安装 Rancher
+## 在 vSphere 环境中安装 Rancher
[本指南](on-premises-rancher-in-vsphere.md)介绍了在 vSphere 环境中安装 Rancher 的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
index 0c47b75fa06..df057a54983 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
@@ -10,27 +10,32 @@ title: Rancher 运行技巧
在设置高可用 Rancher 安装时,请考虑以下事项。
-### 在单独的集群上运行 Rancher
+## 在单独的集群上运行 Rancher
+
不要在安装了 Rancher 的 Kubernetes 集群上运行其他工作负载或微服务。
-### 确保 Kubernetes 节点配置正确
-在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.4/op-guide/performance/)。
+## 确保 Kubernetes 节点配置正确
+
+在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.5/op-guide/performance/)。
+
+## 使用 RKE 时:备份状态文件(Statefile)
-### 使用 RKE 时:备份状态文件(Statefile)
RKE 将集群状态记录在一个名为 `cluster.rkestate` 的文件中,该文件对集群的恢复和/或通过 RKE 维护集群非常重要。由于这个文件包含证书材料,我们强烈建议在备份前对该文件进行加密。请在每次运行 `rke up` 后备份状态文件。
-### 在同一个数据中心运行集群中的所有节点
+## 在同一个数据中心运行集群中的所有节点
+
为达到最佳性能,请在同一地理数据中心运行所有三个节点。如果你在云(如 AWS)上运行节点,请在不同的可用区(AZ)中运行这三个节点。例如,在 us-west-2a 中运行节点 1,在 us-west-2b 中运行节点 2,在 us-west-2c 中运行节点 3。
-### 保证开发和生产环境的相似性
+## 保证开发和生产环境的相似性
+
强烈建议为运行 Rancher 的 Kubernetes 集群配备 “staging” 或 “pre-production” 环境。这个环境的软件和硬件配置应该尽可能接近你的生产环境。
-### 监控集群以规划容量
-Rancher Server 的 Kubernetes 集群应该尽可能满足[系统和硬件要求](../../../pages-for-subheaders/installation-requirements.md)。越偏离系统和硬件要求,你可能面临的风险就越大。
+## 监控集群以规划容量
+
+Rancher Server 的 Kubernetes 集群应该尽可能满足[系统和硬件要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。越偏离系统和硬件要求,你可能面临的风险就越大。
但是,已发布的要求已经考虑了各种工作负载类型,因此,基于指标来规划容量应该是扩展 Rancher 的最佳实践。
你可以将 Rancher 集成业界领先的开源监控解决方案 Prometheus 以及能可视化 Prometheus 指标的 Grafana,来监控集群节点、Kubernetes 组件和软件部署的状态和过程。
-在集群中[启用监控](../../../pages-for-subheaders/monitoring-and-alerting.md)后,你可以通过设置告警通知,来了解集群容量的使用情况。你还可以使用 Prometheus 和 Grafana 监控框架,在你扩容时建立关键指标的基线。
-
+在集群中[启用监控](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)后,你可以通过设置告警通知,来了解集群容量的使用情况。你还可以使用 Prometheus 和 Grafana 监控框架,在你扩容时建立关键指标的基线。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
index 8d5fc725d36..8e98f63729f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
@@ -56,6 +56,6 @@ Rancher 的大部分逻辑都发生在事件处理程序上。每当更新对象
与 Rancher 版本类似,我们建议让你的 kubernetes 集群保持使用最新版本。这将确保你的集群能包含可用的性能增强或错误修复。
### 优化 ETCD
-[ETCD 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
+[ETCD 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
理论上,ETCD 集群中的节点越多,由于复制要求 [source](https://etcd.io/docs/v3.3/faq),它就会越慢。这可能与常见的缩放方法相悖。我们还可以推断,ETCD 的性能将受到节点间距离的反面影响,因为这将减慢网络通信。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
index 552e79ec76b..154b01c5ad4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
@@ -110,7 +110,7 @@ Rancher 的大部分逻辑发生在 Event Handler 上。每当资源对象产生
Etcd 是 Kubernetes 和 Rancher 的后端数据库,在 Rancher 性能中扮演重要的角色。
-[Etcd 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
+[Etcd 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
根据 etcd 的[复制机制](https://etcd.io/docs/v3.5/faq/#what-is-maximum-cluster-size),建议在三个节点上运行 etcd,运行在更多的节点上反而会降低速度。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/kubectl-utility.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/kubectl-utility.md
index 62377d846c6..28315bf813d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/kubectl-utility.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/kubectl-utility.md
@@ -18,7 +18,7 @@ kubectl 用于与 Rancher 进行交互。
_要求_
-如果管理员[关闭了 kubeconfig 令牌生成](../about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),当你运行 `kubectl` 时,kubeconfig 文件需要 [Rancher CLI](./rancher-cli.md) 存在于你的 PATH 中。否则,你会看到这样的错误信息:
+如果管理员[关闭了 kubeconfig 令牌生成](../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),当你运行 `kubectl` 时,kubeconfig 文件需要 [Rancher CLI](./rancher-cli.md) 存在于你的 PATH 中。否则,你会看到这样的错误信息:
`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`。
该功能可以让 kubectl 与 Rancher Server 进行身份验证,并在需要时获得新的 kubeconfig token。目前支持以下验证提供程序:
@@ -29,4 +29,4 @@ _要求_
4. OpenLDAP
5. SAML 身份提供商:Ping,Okta,ADFS,Keycloak 和 Shibboleth
-如果你是第一次运行 kubectl(例如,`kubectl get pods`),它会要求你选择一个验证提供程序并使用 Rancher Server 登录。kubeconfig token 会被缓存到 `./.cache/token` 下你运行 kubectl 的路径中。该 Token 在[过期](../about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌)或[从 Rancher Server 删除](../about-the-api/api-tokens.md#删除令牌)之前都是有效的。过期后,下一个 `kubectl get pods` 命令会要求你再次使用 Rancher Server 登录。
\ No newline at end of file
+如果你是第一次运行 kubectl(例如,`kubectl get pods`),它会要求你选择一个验证提供程序并使用 Rancher Server 登录。kubeconfig token 会被缓存到 `./.cache/token` 下你运行 kubectl 的路径中。该 Token 在[过期](../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌)或[从 Rancher Server 删除](../../api/api-tokens.md#删除令牌)之前都是有效的。过期后,下一个 `kubectl get pods` 命令会要求你再次使用 Rancher Server 登录。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md
index 8e85d8dfe22..c030a973fe8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cli-with-rancher/rancher-cli.md
@@ -9,7 +9,7 @@ description: Rancher CLI 是一个命令行工具,用于在工作站中与 Ran
Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher 进行交互。使用此工具,你可以使用命令行而不用通过 GUI 来操作 Rancher。
-### 下载 Rancher CLI
+## 下载 Rancher CLI
你可以直接 UI 下载二进制文件。
@@ -17,14 +17,14 @@ Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher
1. 在导航侧边栏菜单底部,单击**简介**。
1. 在 **CLI 下载**中,有 Windows、Mac 和 Linux 的二进制文件下载链接。你还可以访问我们的 CLI [发布页面](https://github.com/rancher/cli/releases)直接下载二进制文件。
-### 要求
+## 要求
下载 Rancher CLI 后,你需要进行一些配置。Rancher CLI 需要:
- 你的 Rancher Server URL,用于连接到 Rancher Server。
- API 持有者令牌(Bearer Token),用于向 Rancher 进行身份验证。有关获取持有者令牌的更多信息,请参阅[创建 API 密钥](../user-settings/api-keys.md)。
-### CLI 身份验证
+## CLI 身份验证
在使用 Rancher CLI 控制你的 Rancher Server 之前,你必须使用 API 持有者令牌进行身份验证。运行以下命令进行登录(将 `` 和 `` 替换为你的实际信息):
@@ -34,7 +34,7 @@ $ ./rancher login https:// --token
如果 Rancher Server 使用自签名证书,Rancher CLI 会提示你继续连接。
-### 项目选择
+## 项目选择
在执行命令之前,你必须先选择一个 Rancher 项目来执行这些命令。要选择[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md),请运行 `./rancher context switch` 命令。输入此命令后,会显示可用项目的列表。输入一个数字以选择项目。
@@ -58,34 +58,34 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json
请确保你可以成功运行 `rancher kubectl get pods`。
-### 命令
+## 命令
以下命令可用于 Rancher CLI:
| 命令 | 结果 |
|---|---|
| `apps, [app]` | 对商店应用(即单个 [Helm Chart](https://docs.helm.sh/developing_charts/))或 Rancher Chart 执行操作。 |
-| `catalog` | 对[应用商店](../../pages-for-subheaders/helm-charts-in-rancher.md)执行操作。 |
-| `clusters, [cluster]` | 对[集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)执行操作。 |
+| `catalog` | 对[应用商店](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)执行操作。 |
+| `clusters, [cluster]` | 对[集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)执行操作。 |
| `context` | 在 Rancher [项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)之间切换。有关示例,请参阅[项目选择](#项目选择)。 |
-| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | 显示 [Kubernetes 资源](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types)或 Rancher 资源(即[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)和[工作负载](../../pages-for-subheaders/workloads-and-pods.md))的详细信息。按名称或 ID 指定资源。 |
+| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | 显示 [Kubernetes 资源](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types)或 Rancher 资源(即[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)和[工作负载](../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md))的详细信息。按名称或 ID 指定资源。 |
| `kubectl` | 运行 [kubectl 命令](https://kubernetes.io/docs/reference/kubectl/overview/#operations)。 |
| `login, [l]` | 登录 Rancher Server。有关示例,请参阅 [CLI 身份验证](#cli-身份验证)。 |
| `namespaces, [namespace]` | 执行命名空间操作。 |
| `nodes, [node]` | 执行节点空间操作。 |
| `projects, [project]` | 执行[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)操作。 |
-| `ps` | 显示项目中的[工作负载](../../pages-for-subheaders/workloads-and-pods.md)。 |
+| `ps` | 显示项目中的[工作负载](../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)。 |
| `settings, [setting]` | 显示 Rancher Server 的当前设置。 |
| `ssh` | 使用 SSH 协议连接到你的某个集群节点。 |
| `help, [h]` | 显示命令列表或某个命令的帮助。 |
-### Rancher CLI 帮助
+## Rancher CLI 帮助
使用 CLI 登录 Rancher Server 后,输入 `./rancher --help` 以获取命令列表。
所有命令都支持 `--help` 标志,该标志解释了每个命令的用法。
-### 限制
+## 限制
-Rancher CLI **不能**用于安装[仪表板应用程序或 Rancher 功能 Chart](../../pages-for-subheaders/helm-charts-in-rancher.md)。
+Rancher CLI **不能**用于安装[仪表板应用程序或 Rancher 功能 Chart](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/cluster-configuration.md
index 111aa5afbf6..21f3b31f422 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/cluster-configuration.md
@@ -8,9 +8,9 @@ title: 集群配置
使用 Rancher 配置 Kubernetes 集群后,你仍然可以编辑集群的选项和设置。
-有关编辑集群成员资格的信息,请转至[此页面](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md)。
+有关编辑集群成员资格的信息,请转至[此页面](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md)。
-### 集群配置参考
+## 集群配置参考
集群配置选项取决于 Kubernetes 集群的类型:
@@ -21,7 +21,7 @@ title: 集群配置
- [GKE 集群配置](rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)
- [AKS 集群配置](rancher-server-configuration/aks-cluster-configuration.md)
-### 不同类型集群的管理功能
+## 不同类型集群的管理功能
对于已有集群而言,可提供的选项和设置取决于你配置集群的方法。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
index 98e6b500737..656430783c5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
@@ -4,6 +4,8 @@ title: EC2 主机配置参考
有关 EC2 和节点的更多详细信息,请参阅 [EC2 管理控制台](https://aws.amazon.com/ec2)的官方文档。
+## Machine Pools
+
### 区域
构建集群的地理[区域](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
index 6ea2549ccf8..6abf01e5e2e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
@@ -4,6 +4,8 @@ title: Azure 主机配置
有关 Azure 的更多信息,请参阅官方 [Azure 文档](https://docs.microsoft.com/en-us/azure/?product=featured)。
+## Machine Pools
+
### 环境
Microsoft 提供了多个[云](https://docs.microsoft.com/en-us/cli/azure/cloud?view=azure-cli-latest)来满足地区法律的要求:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
index 6fdddebab84..16cb5d9f12f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
@@ -4,6 +4,8 @@ title: DigitalOcean 主机配置
有关 DigitalOcean、Droplet 的更多详细信息,请参阅[官方文档](https://docs.digitalocean.com/products/compute/)。
+## Machine Pools
+
### 区域
配置创建 Droplet 的[区域](https://docs.digitalocean.com/glossary/region/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
index 56082ed497a..5f222f19325 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
@@ -4,11 +4,11 @@ title: EC2 节点模板配置
有关 EC2 和节点的更多详细信息,请参阅 [EC2 管理控制台](https://aws.amazon.com/ec2)的官方文档。
-### 区域
+## 区域
在**区域**字段中,选择创建云凭证时使用的同一区域。
-### 云凭证
+## 云凭证
你的 AWS 账户访问信息,存储在[云凭证](../../../user-settings/manage-cloud-credentials.md)中。
@@ -21,14 +21,14 @@ title: EC2 节点模板配置
参阅下面的三个示例 JSON 策略:
- [IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#iam-策略示例)
-- [带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
+- [带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
- [允许用户加密 EBS 卷的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#允许加密-ebs-卷的-iam-策略示例)
-### 验证和配置节点
+## 验证和配置节点
为集群选择可用区和网络设置。
-### 安全组
+## 安全组
选择默认安全组或配置安全组。
@@ -36,12 +36,12 @@ title: EC2 节点模板配置
如果你自行为 EC2 实例提供安全组,Rancher 不会对其进行修改。因此,你需要让你的安全组允许 [Rancher 配置实例所需的端口](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rke-上-rancher-server-节点的端口)。有关使用安全组控制 EC2 实例的入站和出站流量的更多信息,请参阅[这里](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#WorkingWithSecurityGroups)。
-### 实例选项
+## 实例选项
配置要创建的实例。确保为 AMI 配置正确的 **SSH 用户**。所选的区域可能不支持默认实例类型。在这种情况下,你必须选择一个确实存在的实例类型。否则将出现错误,表示请求的配置不受支持。
-如果需要传递 **IAM 示例配置名称**(不是 ARN),例如要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md)时,策略则需要其他权限。有关示例策略,请参阅[带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)。
+如果需要传递 **IAM 示例配置名称**(不是 ARN),例如要使用 [Kubernetes 云提供商](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)时,策略则需要其他权限。有关示例策略,请参阅[带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)。
-### 引擎选项
+## 引擎选项
在节点模板的**引擎选项**中,你可以配置容器 daemon。你可能需要指定容器版本或容器镜像仓库 Mirror。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
index 4c00d381982..141c2973e0f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
@@ -4,11 +4,11 @@ title: DigitalOcean 节点模板配置
账户访问信息存储在云凭证中。云凭证存储在 Kubernetes 密文中。多个节点模板可以使用相同的云凭证。你可以使用现有的云凭证或创建新的凭证。
-### Droplet 选项
+## Droplet 选项
**Droplet 选项**用于配置集群的地理区域和规范。
-### Docker Daemon
+## Docker Daemon
如果你使用 Docker,[Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) 配置选项包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
index a296187be6b..d617a0d6497 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
@@ -13,7 +13,7 @@ title: AKS 集群配置参考
在 Rancher UI 中配置 AKS 集群时,无法禁用 RBAC。如果在 AKS 中为集群禁用了 RBAC,则无法在 Rancher 中注册或导入集群。
-Rancher 可以使用与其他集群一样的方式为 AKS 集群配置成员角色。有关详细信息,请参阅 [RBAC](../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+Rancher 可以使用与其他集群一样的方式为 AKS 集群配置成员角色。有关详细信息,请参阅 [RBAC](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
## 云凭证
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
index 5de68eb41ae..be8365d0724 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
@@ -2,7 +2,7 @@
title: EKS 集群配置参考
---
-### 账号访问
+## 账号访问
使用获取的信息为 IAM 策略填写每个下拉列表和字段:
@@ -11,7 +11,7 @@ title: EKS 集群配置参考
| 区域 | 从下拉列表中选择构建集群的地理区域。 |
| 云凭证 | 选择为 IAM 策略创建的云凭证。有关在 Rancher 中创建云凭证的更多信息,请参阅[此页面](../../user-settings/manage-cloud-credentials.md)。 |
-### 服务角色
+## 服务角色
选择一个[服务角色](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)。
@@ -20,15 +20,15 @@ title: EKS 集群配置参考
| Standard:Rancher 生成的服务角色 | 如果选择此角色,Rancher 会自动添加一个服务角色以供集群使用。 |
| 自定义:从现有的服务角色中选择 | 如果选择此角色,Rancher 将允许你从已在 AWS 中创建的服务角色中进行选择。有关在 AWS 中创建自定义服务角色的更多信息,请参阅 [Amazon 文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role)。 |
-### 密文加密
+## 密文加密
可选:要加密密文,请选择或输入在 [AWS 密钥管理服务 (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) 中创建的密钥。
-### API Server 端点访问
+## API Server 端点访问
配置公共/私有 API 访问是一个高级用例。有关详细信息,请参阅 [EKS 集群端点访问控制文档](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html)。
-### 专用 API 端点
+## 专用 API 端点
如果你在创建集群时启用了私有 API 端点访问,并禁用了公共 API 端点访问,那么你必须进行额外的步骤才能使 Rancher 成功连接到集群。在这种情况下,一个弹窗将会显示,其中包含需要在要注册到 Rancher 的集群上运行的命令。配置集群后,你可以在任何能连接到集群的 Kubernetes API 的地方运行显示的命令。
@@ -36,7 +36,7 @@ title: EKS 集群配置参考
- 在创建集群时,创建具有私有和公共 API 端点访问权限的集群。在集群创建并处于 active 状态后,你可以禁用公共访问,Rancher 将能继续与 EKS 集群通信。
- 确保 Rancher 与 EKS 集群共享同一个子网。然后,你可以使用安全组使 Rancher 能够与集群的 API 端点进行通信。在这种情况下,你不需要运行注册集群的命令,Rancher 就能够与你的集群通信。有关配置安全组的更多信息,请参阅[安全组文档](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)。
-### 公共访问端点
+## 公共访问端点
你也可以选择通过显式 CIDR 块来限制对公共端点的访问。
@@ -48,7 +48,7 @@ title: EKS 集群配置参考
有关对集群端点的公共和私有访问的更多信息,请参阅 [Amazon EKS 文档](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html)。
-### 子网
+## 子网
| 选项 | 描述 |
| ------- | ------------ |
@@ -60,7 +60,7 @@ title: EKS 集群配置参考
- [什么是 Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html)
- [VPC 和子网](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
-### 安全组
+## 安全组
Amazon 文档:
@@ -68,7 +68,7 @@ Amazon 文档:
- [VPC 的安全组](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)
- [创建安全组](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group)
-### Logging
+## Logging
将 controlplane 日志配置为发送到 Amazon CloudWatch。如果你将集群日志发送到 CloudWatch Logs,你需要按照 standard CloudWatch Logs 支付数据引入和存储费用。
@@ -76,13 +76,13 @@ Amazon 文档:
有关 EKS controlplane 日志管理的更多信息,请参阅[官方文档](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)。
-### 托管节点组
+## 托管节点组
Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(Amazon EC2 实例)进行预置和生命周期管理。
有关节点组如何工作以及如何配置的更多信息,请参阅 [EKS 文档](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)。
-#### 使用你自己的启动模板
+### 使用你自己的启动模板
你可以提供启动模板 ID 和版本,以便轻松配置节点组中的 EC2 实例。如果你提供了启动模板,则以下设置都无法在 Rancher 中进行配置。因此,如果你使用启动模板,则需要在启动模板中指定以下列表中的所有必须和所需的设置。另请注意,如果提供了启动模板 ID 和版本,则只能更新模板版本。如果要使用新模板 ID,则需要创建新的托管节点组。
@@ -95,11 +95,11 @@ Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(A
| 用户数据 | [MIME 多部分格式](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)的 Cloud init 脚本。 | 选填 |
| 实例资源标签 | 标记节点组中的每个 EC2 实例。 | 选填 |
-#### Rancher 管理的启动模板
+### Rancher 管理的启动模板
如果你不指定启动模板,你将能够在 Rancher UI 中配置上述选项,并且可以在创建后更新所有这些选项。为了利用所有这些选项,Rancher 将为你创建和管理启动模板。Rancher 中的所有集群都将有一个 Rancher 管理的启动模板。此外,每个没有指定启动模板的托管节点组都将具有一个管理的启动模板版本。此启动模板的名称将具有 “rancher-managed-lt-” 前缀,后面是集群的显示名称。此外,Rancher 管理的启动模板将使用 “rancher-managed-template” 键和 “do-not-modify-or-delete” 值来进行标记,以将其识别为 Rancher 管理的启动模板。请注意,不要修改或删除此启动模板,或将此启动模板与其他集群或托管节点组一起使用。因为这可能会使你的节点组“降级”并需要销毁和重新创建。
-#### 自定义 AMI
+### 自定义 AMI
如果你在启动模板或 Rancher 中指定了自定义 AMI,则必须[正确配置](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/)镜像,并且必须提供用户数据以[引导节点](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami)。这是一个高级用例,因此你必须要了解其要求。
@@ -111,7 +111,7 @@ Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(A
:::
-#### Spot 实例
+### Spot 实例
Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot)。如果你指定了启动模板,Amazon 建议不要在模板中提供实例类型。相反,Amazon 建议提供多种实例类型。如果你为节点组启用了“请求 Spot 实例”复选框,那么你将有机会提供多种实例类型。
@@ -121,7 +121,7 @@ Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/usergui
:::
-#### 节点组设置
+### 节点组设置
以下设置也是可配置的。在创建节点组后,除“节点组名称”外的所有选项都是可编辑的。
@@ -135,7 +135,7 @@ Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/usergui
| Tags | 管理的节点组的标签,这些标签不会传播到任何相关资源。 |
-### 配置刷新间隔
+## 配置刷新间隔
`eks-refresh-cron` 设置已弃用。它已迁移到 `eks-refresh` 设置,这是一个表示秒的整数。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
index fa562ae9cee..6cb63287e57 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
@@ -4,11 +4,11 @@ title: 私有集群
在 GKE 中,[私有集群](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept)是一种集群,其节点仅通过分配内部 IP 地址与入站和出站流量相隔离。GKE 中的私有集群可以选择将 controlplane 端点作为公开访问的地址或作为私有地址。这与其他 Kubernetes 提供商不同,后者可能将具有私有 controlplane 端点的集群称为“私有集群”,但仍允许进出节点的流量。基于你的组织的网络和安全要求,你可能想创建一个有私有节点的集群,其中有或没有公共 controlplane 端点。从 Rancher 配置的 GKE 集群可以通过在**集群选项**中选择**私有集群**(在**显示高级选项**下)来使用隔离的节点。通过选择**启用私有端点**,可以选择将 controlplane 端点设为私有。
-### 私有节点
+## 私有节点
由于私有集群中的节点只有内部 IP 地址,它们将无法安装 cluster agent,Rancher 将无法完全管理集群。这可以通过几种方式来处理。
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -18,7 +18,7 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
如果限制外出的互联网访问对你的组织来说不是一个问题,可以使用 Google 的 [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) 服务来允许私有网络中的节点访问互联网,使它们能够从 Dockerhub 下载所需的镜像并与 Rancher management server 通信。这是最简单的解决方案。
-#### 私有镜像仓库
+### 私有镜像仓库
:::caution
@@ -26,13 +26,13 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
:::
-如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。
+如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](../../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。
-### 私有 controlplane 端点
+## 私有 controlplane 端点
如果集群暴露了公共端点,Rancher 将能够访问集群,且无需执行额外的步骤。但是,如果集群没有公共端点,则必须确保 Rancher 可以访问集群。
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -42,7 +42,7 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
如上所述,如果不考虑限制对节点的传出互联网访问,则可以使用 Google 的 [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) 服务来允许节点访问互联网。当集群进行配置时,Rancher 将提供一个在集群上运行的注册命令。下载新集群的 [kubeconfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) 并在集群上运行提供的 kubectl 命令。如果要通过获取集群访问权来运行此命令,你可以创建临时节点或使用 VPC 中的现有节点,或者登录到某个集群节点或使用某个集群节点创建 SSH 隧道。
-#### 直接访问
+### 直接访问
如果 Rancher server 与集群的 controlplane 运行在同一 VPC 上,它将直接访问 controlplane 的私有端点。集群节点将需要访问[私有镜像仓库](#私有镜像仓库)以下载上述的镜像。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
index 7cdc1ec7c64..6d4f2525bed 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
@@ -2,7 +2,7 @@
title: RKE 集群配置参考
---
-Rancher 安装 Kubernetes 时,它使用 [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 或 [RKE2](https://docs.rke2.io/) 作为 Kubernetes 发行版。
+Rancher 安装 Kubernetes 时,它使用 [RKE](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 或 [RKE2](https://docs.rke2.io/) 作为 Kubernetes 发行版。
本文介绍 Rancher 中可用于新的或现有的 RKE Kubernetes 集群的配置选项。
@@ -16,7 +16,7 @@ Rancher 安装 Kubernetes 时,它使用 [RKE](../../../pages-for-subheaders/la
RKE 集群配置选项嵌套在 `rancher_kubernetes_engine_config` 参数下。有关详细信息,请参阅[集群配置文件](#rke-集群配置文件参考)。
-在 [RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中,你可以编辑任何后续剩余的选项。
+在 [RKE 启动的集群](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可以编辑任何后续剩余的选项。
有关 RKE 配置文件语法的示例,请参阅 [RKE 文档](https://rancher.com/docs/rke/latest/en/example-yamls/)。
@@ -88,7 +88,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
### Kubernetes 云提供商
-你可以配置 [Kubernetes 云提供商](../../../pages-for-subheaders/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
+你可以配置 [Kubernetes 云提供商](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
:::note
@@ -131,7 +131,7 @@ Rancher v2.6 引入了[为 RKE 集群配置 ECR 镜像仓库](https://rancher.co
### 节点池
-有关使用 Rancher UI 在 RKE 集群中设置节点池的信息,请参阅[此页面](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+有关使用 Rancher UI 在 RKE 集群中设置节点池的信息,请参阅[此页面](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)。
### NGINX Ingress
@@ -325,7 +325,7 @@ windows_prefered_cluster: false
### enable_cluster_monitoring
-启用或禁用[集群监控](../../../pages-for-subheaders/monitoring-and-alerting.md)的选项。
+启用或禁用[集群监控](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)的选项。
### enable_network_policy
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
index 3f4328512ae..7a872f77ea2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
@@ -110,13 +110,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
所有 CNI 网络插件都支持[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)网络。要在双栈模式下配置 RKE2,请为你的[集群 CIDR](#集群-cidr) 和/或 [Service CIDR](#service-cidr) 设置有效的 IPv4/IPv6 CIDR。
-###### 额外配置 {#dual-stack-additional-config}
+###### 额外配置
使用 `cilium` 或 `multus,cilium` 作为容器网络接口提供商时,请确保**启用 IPv6 支持**选项。
#### 云提供商
-你可以配置 [Kubernetes 云提供商](../../../pages-for-subheaders/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
+你可以配置 [Kubernetes 云提供商](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
:::note
@@ -130,7 +130,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### Worker CIS 配置文件
-选择一个 [CIS benchmark](../../../pages-for-subheaders/cis-scan-guides.md) 来验证系统配置。
+选择一个 [CIS benchmark](../../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md) 来验证系统配置。
#### 项目网络隔离
@@ -140,13 +140,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### CoreDNS
-默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking#coredns)。
+默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#coredns)。
#### NGINX Ingress
-如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
-有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
#### Metrics Server
@@ -182,7 +182,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
要配置[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)模式,请输入有效的 IPv4/IPv6 CIDR。例如 `10.42.0.0/16,2001:cafe:42:0::/56`。
-使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#dual-stack-additional-config)。
+使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#额外配置)。
#### Service CIDR
@@ -192,7 +192,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
要配置[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)模式,请输入有效的 IPv4/IPv6 CIDR。例如 `10.42.0.0/16,2001:cafe:42:0::/56`。
-使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#dual-stack-additional-config)。
+使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#额外配置)。
#### 集群 DNS
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
index 823aaa2de81..af2782a81b3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
@@ -8,7 +8,7 @@ title: 同步
如果你同时处理来自另一个来源的更新,你可能会不小心覆盖一个来源的状态。如果你在完成一个来源的更新后 5 分钟内处理另一个来源的更新,也可能会发生这种情况。
:::
-### 工作原理
+## 工作原理
要理解同步是如何工作的,则必须理解 Rancher Cluster 对象上的两个字段:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
index c301e408d1f..895c75d5c0e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
@@ -2,7 +2,7 @@
title: Rancher Agent 选项
---
-Rancher 在每个节点上部署一个 Agent 来与节点通信。本文描述了可以传递给 Agent 的选项。要使用这些选项,你需要[使用自定义节点创建集群](../../../../pages-for-subheaders/use-existing-nodes.md),并在添加节点时将选项添加到生成的 `docker run` 命令。
+Rancher 在每个节点上部署一个 Agent 来与节点通信。本文描述了可以传递给 Agent 的选项。要使用这些选项,你需要[使用自定义节点创建集群](./use-existing-nodes.md),并在添加节点时将选项添加到生成的 `docker run` 命令。
有关 Rancher 如何使用 Node Agent 与下游集群通信的概述,请参阅[产品架构](../../../rancher-manager-architecture/communicating-with-downstream-user-clusters.md#3-node-agents)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
index 555b50a4a41..aaa9b78100e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
@@ -69,7 +69,7 @@ description: 要创建具有自定义节点的集群,你需要访问集群中
:::
-8. **可选**:点击**[显示高级选项](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md)**来指定注册节点时使用的 IP 地址,覆盖节点的主机名,或将[标签](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)或[污点](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)添加到节点。
+8. **可选**:点击**[显示高级选项](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md)**来指定注册节点时使用的 IP 地址,覆盖节点的主机名,或将[标签](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)或[污点](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)添加到节点。
9. 将屏幕上显示的命令复制到剪贴板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/kubernetes-concepts.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/kubernetes-concepts.md
index 17a1bc2b969..5666ba5f4fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/kubernetes-concepts.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/kubernetes-concepts.md
@@ -53,7 +53,7 @@ controlplane 节点上运行 Kubernetes API server、scheduler 和 Controller Ma
- **Kubelets**:监控节点状态的 Agent,确保你的容器处于健康状态。
- **工作负载**:承载应用和其他 deployment 的容器和 Pod。
-Worker 节点也运行存储和网络驱动,有必要时也会运行 Ingress Controller。你可以根据需要,创建尽可能多的 worker 节点来运行你的[工作负载](../pages-for-subheaders/workloads-and-pods.md)。
+Worker 节点也运行存储和网络驱动,有必要时也会运行 Ingress Controller。你可以根据需要,创建尽可能多的 worker 节点来运行你的[工作负载](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)。
## 关于 Helm
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/examples.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/examples.md
index 98169f63376..ed044a40a65 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/examples.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/examples.md
@@ -2,15 +2,15 @@
title: 示例
---
-### ServiceMonitor
+## ServiceMonitor
你可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)找到 ServiceMonitor 自定义资源的示例。
-### PodMonitor
+## PodMonitor
你可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml)找到 PodMonitor 示例,还可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml)找到引用它的 Prometheus 资源示例。
-### PrometheusRule
+## PrometheusRule
PrometheusRule 包含你通常放置在 [Prometheus 规则文件](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)中的告警和记录规则。
@@ -18,6 +18,6 @@ PrometheusRule 包含你通常放置在 [Prometheus 规则文件](https://promet
你可以在[此页面](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)找到 PrometheusRule 示例。
-### Alertmanager 配置
+## Alertmanager 配置
有关示例配置,请参阅[本节](./receivers.md#alertmanager-配置示例)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
index e514d60a928..b7377b610b1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
@@ -14,7 +14,7 @@ ServiceMonitor 比 PodMonitor 更常用,推荐用于大多数用例。
:::
-### ServiceMonitor
+## ServiceMonitor
这个伪 CRD 映射到 Prometheus 自定义资源配置的一部分。它以声明方式指定应如何监控 Kubernetes 服务组。
@@ -24,7 +24,7 @@ ServiceMonitor 比 PodMonitor 更常用,推荐用于大多数用例。
有关 ServiceMonitor 工作原理的更多信息,请参阅 [Prometheus Operator 文档](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)。
-### PodMonitor
+## PodMonitor
这个伪 CRD 映射到 Prometheus 自定义资源配置的一部分。它以声明方式指定应如何监控 Pod 组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/prometheus-federator/rbac.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/prometheus-federator/rbac.md
index a519b5a8cbc..c7aea4be4be 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/prometheus-federator/rbac.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/prometheus-federator/rbac.md
@@ -4,7 +4,7 @@ title: RBAC
本文介绍 Prometheus Federator RBAC。
-如[命名空间](../../pages-for-subheaders/prometheus-federator.md#命名空间)部分所述,Prometheus Federator 期望集群中具有项目级别权限(例如,具有由单个标签选择器确定的命名空间组的权限)的项目所有者、项目成员和其他用户,除了项目 Registration 命名空间(默认导入到项目中)和那些已经包含其项目的命名空间之外,在任何其他命名空间中都只有最低权限。因此,为了让项目所有者将特定 Chart 权限分配给其项目命名空间中的其他用户,Helm Project Operator 将自动监视以下绑定:
+如[命名空间](./prometheus-federator.md#命名空间)部分所述,Prometheus Federator 期望集群中具有项目级别权限(例如,具有由单个标签选择器确定的命名空间组的权限)的项目所有者、项目成员和其他用户,除了项目 Registration 命名空间(默认导入到项目中)和那些已经包含其项目的命名空间之外,在任何其他命名空间中都只有最低权限。因此,为了让项目所有者将特定 Chart 权限分配给其项目命名空间中的其他用户,Helm Project Operator 将自动监视以下绑定:
- ClusterRoleBindings
- 项目发布命名空间中的 RoleBindings
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-cluster-tools.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-cluster-tools.md
index 7f1ed5206c7..e454be93e76 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-cluster-tools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-cluster-tools.md
@@ -17,7 +17,7 @@ Logging 支持:
Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
-有关详细信息,请参阅 [Logging 文档](../pages-for-subheaders/logging.md)。
+有关详细信息,请参阅 [Logging 文档](../integrations-in-rancher/logging/logging.md)。
## 监控和告警
你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。
@@ -28,7 +28,7 @@ Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
告警是触发这些通知的规则。在接收告警之前,你必须在 Rancher 中配置一个或多个通知器。你可以在集群或项目级别设置告警范围。
-如需更多信息,请参阅[监控文档](../pages-for-subheaders/monitoring-and-alerting.md)。
+如需更多信息,请参阅[监控文档](../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
## Istio
@@ -36,7 +36,7 @@ Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
Rancher v2.5 改进了与 Istio 的集成。
-如需更多信息,请参阅 [Istio 文档](../pages-for-subheaders/istio.md)。
+如需更多信息,请参阅 [Istio 文档](..//integrations-in-rancher/istio/istio.md)。
## OPA Gatekeeper
[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) 是一个开源项目,它对 OPA 和 Kubernetes 进行了集成,以通过许可控制器 Webhook 提供策略控制。有关如何在 Rancher 中启用 Gatekeeper 的详细信息,请参阅 [OPA Gatekeeper](../integrations-in-rancher/opa-gatekeeper.md)。
@@ -45,4 +45,4 @@ Rancher v2.5 改进了与 Istio 的集成。
Rancher 可以通过运行安全扫描来检查 Kubernetes 是否按照 CIS Kubernetes Benchmark 中定义的安全最佳实践进行部署。
-如需更多信息,请参阅 [CIS 扫描文档](../pages-for-subheaders/cis-scan-guides.md)。
\ No newline at end of file
+如需更多信息,请参阅 [CIS 扫描文档](../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/architecture-recommendations.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/architecture-recommendations.md
index 10ae10563b0..7e61817fbe6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/architecture-recommendations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/architecture-recommendations.md
@@ -53,7 +53,7 @@ title: 架构推荐
我们强烈建议你把 Rancher 安装到托管在云提供商(如 AWS EC2 和 Google Compute Engine(GCE)等)上的 Kubernetes 集群上。
-为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
+为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
## Kubernetes 安装的推荐节点角色
@@ -95,7 +95,7 @@ RKE 每个角色至少需要一个节点,但并不强制每个节点只能有
由于 Rancher Server 集群中没有部署其他工作负载,因此在大多数情况下,这个集群都不需要使用我们出于可扩展性和可用性的考虑,而为下游集群推荐的架构。
-有关下游集群的最佳实践,请查看[生产环境清单](../../pages-for-subheaders/checklist-for-production-ready-clusters.md)或[最佳实践](../../pages-for-subheaders/best-practices.md)。
+有关下游集群的最佳实践,请查看[生产环境清单](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md)或[最佳实践](../best-practices/best-practices.md)。
## 授权集群端点架构
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
index 56800763102..9d7570ba151 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
@@ -17,6 +17,8 @@ title: 与下游集群通信
3. [Node Agents](#3-node-agents)
4. [授权集群端点](#4-授权集群端点)
+## Components
+
### 1. 认证代理
在此图中,名为 Bob 的用户希望查看在名为 User Cluster 1 的下游集群上运行的所有 Pod。在 Rancher 中,他可以运行 `kubectl` 命令来查看
@@ -58,7 +60,7 @@ Cluster Agent,也叫做 `cattle-cluster-agent`,是运行在下游集群中
授权集群端点(ACE)可连接到下游集群的 Kubernetes API Server,而不用通过 Rancher 认证代理调度请求。
-> 授权集群端点仅适用于 Rancher 启动的 Kubernetes 集群,即只适用于 Rancher [使用 RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 来配置的集群。它不适用于导入的集群,也不适用于托管在 Kubernetes 提供商中的集群(例如 Amazon 的 EKS)。
+> 授权集群端点仅适用于 Rancher 启动的 Kubernetes 集群,即只适用于 Rancher [使用 RKE](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 来配置的集群。它不适用于导入的集群,也不适用于托管在 Kubernetes 提供商中的集群(例如 Amazon 的 EKS)。
授权集群端点的主要用途:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
index b715f4be1e4..ef25d5ea4c1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
@@ -6,9 +6,9 @@ title: Rancher Server 和 Components
下图展示了 Rancher 2.x 的上层架构。下图中,Rancher Server 管理两个下游 Kubernetes 集群,其中一个由 RKE 创建,另一个由 Amazon EKS 创建。
-为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
+为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
-下图介绍了用户如何通过 Rancher 的认证代理管理 [Rancher 启动的 Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群和[托管的 Kubernetes](../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) 集群:
+下图介绍了用户如何通过 Rancher 的认证代理管理 [Rancher 启动的 Kubernetes](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 集群和[托管的 Kubernetes](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md) 集群:
通过 Rancher 的认证代理管理 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-project-tools.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-project-tools.md
index ad05d4166e0..fd99a83952e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-project-tools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-project-tools.md
@@ -25,8 +25,8 @@ Logging 支持:
Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
-有关详细信息,请参阅 [Logging](../pages-for-subheaders/logging.md)。
+有关详细信息,请参阅 [Logging](../integrations-in-rancher/logging/logging.md)。
## Monitoring
-你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。有关详细信息,请参阅 [Monitoring](../pages-for-subheaders/monitoring-and-alerting.md)。
+你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。有关详细信息,请参阅 [Monitoring](../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
index d40edd47e96..d3c891e156c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
@@ -102,7 +102,7 @@ Rancher 提供了 `rancher-restricted` 模板,用于强制执行高度限制
-K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://v1-24.docs.kubernetes.io/docs/concepts/security/pod-security-policy/) 以控制 Pod 安全性。
+K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://github.com/kubernetes/website/blob/release-1.24/content/en/docs/concepts/security/pod-security-policy.md) 以控制 Pod 安全性。
你可以在 Rancher 中通过集群配置,传递以下标志来启用 PSPs:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/kubernetes-security-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/kubernetes-security-best-practices.md
index af9746e899f..5b53a6b0467 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/kubernetes-security-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/kubernetes-security-best-practices.md
@@ -2,7 +2,7 @@
title: Kubernetes 安全最佳实践
---
-### 限制云元数据 API 访问
+## 限制云元数据 API 访问
AWS、Azure、DigitalOcean 或 GCP 等云提供商通常会在本地向实例公开元数据服务。默认情况下,此端点可被运行在云实例上的 pod 访问,包括在托管的 Kubernetes(如 EKS、AKS、DigitalOcean Kubernetes 或 GKE)中的 pod,并且可以包含该节点的云凭证、配置数据(如 kubelet 凭证)以及其他敏感数据。为了降低在云平台上运行的这种风险,请遵循 [Kubernetes 安全建议](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/#restricting-cloud-metadata-api-access),即限制授予实例凭证的权限,使用网络策略限制 pod 对元数据 API 的访问,并避免使用配置数据来传递密文。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security-best-practices.md
index 0d98495ffec..9b13fb3e842 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security-best-practices.md
@@ -6,7 +6,7 @@ title: Rancher 安全最佳实践
-### 限制对 /version 和 /rancherversion 的公共访问
+## 限制对 /version 和 /rancherversion 的公共访问
上游(本地) Rancher 实例提供正在运行的 Rancher 版本和用于构建它的 Go 版本信息。这些信息可以通过 `/version` 路径访问,该路径用于诸如自动化版本升级或确认部署成功等任务。上游实例还提供了可通过 `/rancherversion` 路径访问的 Rancher 版本信息。
@@ -14,7 +14,7 @@ title: Rancher 安全最佳实践
更多关于保护服务器的详细信息,请参阅 [OWASP Web Application Security Testing - Enumerate Infrastructure and Application Admin Interfaces](https://owasp.org/www-project-web-security-testing-guide/stable/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/05-Enumerate_Infrastructure_and_Application_Admin_Interfaces.html)。
-### 会话管理
+## 会话管理
某些环境可能需要额外的安全控制来管理会话。例如,你可能希望限制用户的并发活动会话或限制可以从哪些地理位置发起这些会话。Rancher 默认情况下不支持这些功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security.md b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security.md
index 935aaa2b780..85c1e15e37c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/current/reference-guides/rancher-security/rancher-security.md
@@ -27,11 +27,11 @@ title: 安全
本文介绍了安全相关的文档以及资源,让你的 Rancher 安装和下游 Kubernetes 集群更加安全。
-### NeuVector 与 Rancher 的集成
+## NeuVector 与 Rancher 的集成
NeuVector 是一个开源的、以容器为中心的安全应用程序,现已集成到 Rancher 中。NeuVector 提供生产安全、DevOps 漏洞保护和容器防火墙等功能。请参阅 [Rancher 文档](../../integrations-in-rancher/neuvector/neuvector.md) 和 [NeuVector 文档](https://open-docs.neuvector.com/)了解更多信息。
-### 在 Kubernetes 集群上运行 CIS 安全扫描
+## 在 Kubernetes 集群上运行 CIS 安全扫描
Rancher 使用 [kube-bench](https://github.com/aquasecurity/kube-bench) 来运行安全扫描,从而检查 Kubernetes 是否按照 [CIS](https://www.cisecurity.org/cis-benchmarks/)(Center for Internet Security,互联网安全中心)Kubernetes Benchmark 中定义的安全最佳实践进行部署。
@@ -47,13 +47,13 @@ Rancher 在集群上运行 CIS 安全扫描时会生成一份报告,该报告
有关详细信息,请参阅[安全扫描](../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)。
-### SELinux RPM
+## SELinux RPM
[安全增强型 Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) 是对 Linux 的安全增强。被政府机构使用之后,SELinux 已成为行业标准,并在 CentOS 7 和 8 上默认启用。
我们提供了 `rancher-selinux` 和 `rke2-selinux` 两个 RPM(Red Hat 软件包),让 Rancher 产品能够在 SELinux 主机上正常运行。有关详细信息,请参阅[此页面](selinux-rpm/selinux-rpm.md)。
-### Rancher 加固指南
+## Rancher 加固指南
Rancher 加固指南基于 CIS Kubernetes Benchmark 。
@@ -63,7 +63,7 @@ Rancher 加固指南基于
-
-import {Redirect} from '@docusaurus/router';
-
-const Home = () => {
-return ;
-};
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/backups/docker-installs/docker-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/backups/docker-installs/docker-installs.md
deleted file mode 100644
index 51c3001d777..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/backups/docker-installs/docker-installs.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: 备份和恢复 Docker 安装的 Rancher
----
-
-- [备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)
-- [还原](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md
deleted file mode 100644
index 39c332461ce..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: RKE 集群配置
----
-
-本文已迁移到[此处](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/container-network-interface-providers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/container-network-interface-providers.md
index ad796bb5c4c..0d9c93e18fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/container-network-interface-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/container-network-interface-providers.md
@@ -90,7 +90,7 @@ Kubernetes worker 需要打开 TCP 端口 `6783`(控制端口)、UDP 端口
有关详细信息,请参阅以下页面:
-- [Weave Net 官网](https://www.weave.works/)
+- [Weave Net 官网](https://github.com/weaveworks/weave/blob/master/site/overview.md)
### RKE2 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/dockershim.md
index cfab0dfbaf4..d1dc4c19136 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/faq/dockershim.md
@@ -18,15 +18,15 @@ enable_cri_dockerd: true
-Q. 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
+Q: 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
对于 RKE,Dockershim 的上游支持从 Kubernetes 1.21 开始。你需要使用 Rancher 2.6 或更高版本才能获取使用 Kubernetes 1.21 的 RKE 的支持。详情请参阅我们的[支持矩阵](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
-Q. 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
+Q: 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
-A. 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
+A: 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
index c34d930bf00..58794eba2e4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -76,11 +76,11 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
1. 输入以下命令:
- ```
- docker run -d --restart=unless-stopped \
- -p 80:80 -p 443:443 \
- rancher/rancher:latest --no-cacerts
- ```
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
index 6cadea62ced..8cb54a0bf7a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -4,7 +4,7 @@ title: 为大型安装进行 etcd 调优
当你运行具有 15 个或更多集群的大型 Rancher 安装时,我们建议你扩大 etcd 的默认 keyspace(默认为 2GB)。你最大可以将它设置为 8GB。此外,请确保主机有足够的 RAM 来保存整个数据集。如果需要增加这个值,你还需要同步增加主机的大小。如果你预计在垃圾回收间隔期间 Pod 的变化率很高,你也可以在较小的安装中调整 Keyspace 大小。
-Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
+Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.4/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
### 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
@@ -19,7 +19,7 @@ services:
## 扩展 etcd 磁盘性能
-你可以参见 [etcd 文档](https://etcd.io/docs/v3.4.0/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
+你可以参见 [etcd 文档](https://etcd.io/docs/v3.4/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
此外,为了减少 etcd 磁盘上的 IO 争用,你可以为 data 和 wal 目录使用专用设备。etcd 最佳实践不建议配置 Mirror RAID(因为 etcd 在集群中的节点之间复制数据)。你可以使用 striping RAID 配置来增加可用的 IOPS。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
index 3efac3e477e..0bf174f1ab7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -56,4 +56,4 @@ RKE 模板可以应用于新集群。
- 创建了一个新的 RKE 模板。
- 将集群转换为使用该新模板。
-- 可以[使用新模板创建新集群](apply-templates.md#使用-rke-模板创建集群)。
\ No newline at end of file
+- 可以[使用新模板创建新集群](#使用-rke-模板创建集群)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index c5e91cf7108..a5575735c20 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -48,5 +48,5 @@ title: 生产就绪集群检查清单
### 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](../set-up-cloud-providers/set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
index a0ec43f76c5..7279046a845 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -53,7 +53,7 @@ title: 推荐的集群架构
参考:
-* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance)
+* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.4/faq/#what-is-failure-tolerance)
* [为 Kubernetes 操作 etcd 集群的官方 Kubernetes 文档](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
### Worker 节点数
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
index 18e4a4da40b..61267913f01 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
@@ -104,7 +104,7 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
有关大型 Kubernetes 集群的硬件建议,请参阅[构建大型集群](https://kubernetes.io/docs/setup/best-practices/cluster-large/)的官方 Kubernetes 文档。
-有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.4.0/op-guide/hardware/)。
+有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.4/op-guide/hardware/)。
## 网络要求
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
index 217f3ed9577..fe0b29562ee 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
@@ -12,7 +12,7 @@ _kubeconfig 文件_ 是与 kubectl 命令行工具(或其他客户端)结合
下载 kubeconfig 文件后,你将能够使用 kubeconfig 文件及其 Kubernetes [上下文](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration)访问下游集群。
-如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](./authorized-cluster-endpoint.md) 存在于你的 PATH 中。
+如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](../../../../reference-guides/cli-with-rancher/rancher-cli.md) 存在于你的 PATH 中。
### RKE 集群的两种身份验证方法
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
index 6582e5e0f50..8ea6ecae323 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
@@ -66,7 +66,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **StatefulSet**。
1. 在**卷声明模板**选项卡上,单击**添加声明模板**。
1. 输入持久卷的名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 点击**启动**。
@@ -80,7 +80,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **⋮ > 编辑配置**,转到使用由 StorageClass 配置的存储的工作负载。
1. 在**卷声明模板**中,单击**添加声明模板**。
1. 输入持久卷名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 单击**保存**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
index a262560e90f..7b2627dc6a1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
@@ -300,7 +300,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
| max-node-provision-time | "15m" | CA 等待节点配置的最长时间 |
| nodes | - | 以云提供商接受的格式设置节点组的最小、最大大小和其他配置数据。可以多次使用。格式是 `::`。 |
| node-group-auto-discovery | - | 节点组自动发现的一个或多个定义。定义表示为 `:[[=]]` |
-| estimator | - | "binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
+| estimator |"binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
| expander | "random" | 要在扩容中使用的节点组扩展器的类型。可用值:`["random","most-pods","least-waste","price","priority"]` |
| ignore-daemonsets-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 DaemonSet pod |
| ignore-mirror-pods-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 Mirror pod |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
index 65a6e6254f1..215966fd905 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
@@ -169,12 +169,12 @@ Rancher 在 Kubernetes 之上进行了扩展,除了集群级别之外,还允
### 4. 可选:添加资源配额
-资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
要添加资源配额:
1. 在**资源配额**选项卡中,单击**添加资源**。
-1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 输入**项目限制**和**命名空间默认限制**的值。
1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../../pages-for-subheaders/manage-project-resource-quotas.md)。
1. 单击**创建**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md
index 1c8aeba0767..5757fadb42d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/how-to-guides/new-user-guides/manage-namespaces.md
@@ -1,4 +1,4 @@
----
+(#creating-namespaces)---
title: 命名空间
---
@@ -21,7 +21,7 @@ title: 命名空间
:::note
-如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](manage-namespaces.md),以确保你有权访问该命名空间。
+如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](#创建命名空间),以确保你有权访问该命名空间。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/integrations-in-rancher/istio/cpu-and-memory-allocations.md
index ce4a3fd3f89..81337a9344a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/integrations-in-rancher/istio/cpu-and-memory-allocations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/integrations-in-rancher/istio/cpu-and-memory-allocations.md
@@ -44,7 +44,7 @@ Kubernetes 中的资源请求指的是,除非该节点至少具有指定数量
1. 在左侧导航栏中,点击 **Apps**。
1. 点击**已安装的应用**。
1. 转到 `istio-system` 命名空间。在某个 Istio 工作负载中(例如 `rancher-istio`),点击**⋮ > 编辑/升级**。
-1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](cpu-and-memory-allocations.md#编辑覆盖文件)。
+1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](#编辑覆盖文件)。
1. 更改 CPU 或内存分配、调度各个组件的节点,或节点容忍度。
1. 点击**升级**。然后,更改就能启用。
@@ -56,7 +56,7 @@ Kubernetes 中的资源请求指的是,除非该节点至少具有指定数量
1. 在左侧导航栏中,点击**应用 & 应用市场**。
1. 点击**已安装的应用**。
1. 转到 `istio-system` 命名空间。在某个 Istio 工作负载中(例如 `rancher-istio`),点击**⋮ > 编辑/升级**。
-1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](cpu-and-memory-allocations.md#编辑覆盖文件)。
+1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](#编辑覆盖文件)。
1. 更改 CPU 或内存分配、调度各个组件的节点,或节点容忍度。
1. 点击**升级**。然后,更改就能启用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md
index 5c925f124ed..0b208d527d4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/pages-for-subheaders/checklist-for-production-ready-clusters.md
@@ -44,5 +44,5 @@ title: 生产就绪集群检查清单
### 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
index 6861e9e361d..443e219380e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
@@ -140,13 +140,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### CoreDNS
-默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking#coredns)。
+默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#coredns)。
#### NGINX Ingress
-如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
-有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
#### Metrics Server
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-security/security-advisories-and-cves.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-security/security-advisories-and-cves.md
index dfee73d487d..b6bdedb5d4b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-security/security-advisories-and-cves.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/reference-guides/rancher-security/security-advisories-and-cves.md
@@ -27,7 +27,7 @@ Rancher 致力于向社区披露我们产品的安全问题。我们会针对已
| [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | 在 Rancher 2.5.15 和 2.6.6 及之前的版本中发现了一个问题。授权逻辑缺陷允许在下游集群中通过集群角色模板绑定 (CRTB) 和项目角色模板绑定 (PRTB) 来提升权限。任何有权限创建/编辑 CRTB 或 PRTB 的用户(例如 `cluster-owner`、`manage cluster members`、`project-owner` 和 `manage project members`)都可以利用该漏洞,在同一集群的另一个项目或不同下游集群的另一个项目中获得所有者权限。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | 2.5.12 到 2.6.3 的 Rancher 版本无法正确清理集群模板 answer 中的凭证。此错误可能会导致明文存储以及凭证、密码和 API 令牌被暴露。在 Rancher 中,已认证的 `Cluster Owner`、`Cluster Member`、`Project Owner` 和 `Project Member` 可以在 `/v1/management.cattle.io.clusters`、`/v3/clusters` 和 `/k8s/clusters/local/apis/management.cattle.io/v3/clusters` 端点上看到暴露的凭证。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | 在 2.5.15 到 2.6.6 的 Rancher 版本中发现了一个问题,其中密码、API 密钥和 Rancher 的 ServiceAccount 令牌(用于配置集群)等敏感字段直接以明文形式存储在 `Cluster` 等 Kubernetes 对象上(例如,`cluster.management.cattle.io`)。任何能够读取 Kubernetes API 中的对象的用户都可以检索这些敏感数据的明文版本。该问题由 Florian Struck(来自 [Continum AG](https://www.continum.net/))和 [Marco Stuurman](https://github.com/fe-ax)(来自 [Shock Media B.V.](https://www.shockmedia.nl/))发现并报告。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
-| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/)配置 [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
+| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/)配置 [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://github.com/weaveworks/weave/blob/master/site/tasks/manage/security-untrusted-networks.md)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | 在 Rancher 2.5.0 到 2.5.12 和 Rancher 2.6.0 到 2.6.3 中发现了一个漏洞,该漏洞允许能创建或更新[全局角色](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/)的用户将他们或其他用户升级为管理员。全局角色能授予用户 Rancher 级别的权限,例如能创建集群。在已识别的 Rancher 版本中,如果用户被授予了编辑或创建全局角色的权限,他们不仅仅能授予他们已经拥有的权限。此漏洞影响使用能够创建或编辑全局角色的非管理员用户的客户。此场景最常见的用例是 `restricted-admin` 角色。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | 此漏洞仅影响在 Rancher 中使用 `restricted-admin` 角色的客户。在 Rancher 2.5.0 到 2.5.12 和 2.6.0 到 2.6.3 中发现了一个漏洞,其中 `cattle-global-data` 命名空间中的 `global-data` 角色授予了应用商店的写权限。由于具有任何级别的应用商店访问权限的用户都会绑定到 `global-data` 角色,因此这些用户都能写入模板 `CatalogTemplates`) 和模板版本 (`CatalogTemplateVersions`)。在 Rancher 中创建的新用户默认分配到 `user` 角色(普通用户),该角色本不该具有写入应用商店的权限。此漏洞提升了能写入应用商店模板和应用商店模板版本资源的用户的权限。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | 此漏洞仅影响使用经过认证的 Git 和/或 Helm 仓库通过 [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) 进行持续交付的客户。在 [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) 之前版本中的 `go-getter` 库中发现了一个问题,错误消息中没有删除 Base64 编码的 SSH 私钥,导致该信息暴露。Rancher 中 [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9) 之前的 Fleet 版本使用了该库的漏洞版本。此问题影响 Rancher 2.5.0 到 2.5.12(包括 2.5.12)以及 2.6.0 到 2.6.3(包括 2.6.3)。该问题由 Raft Engineering 的 Dagan Henderson 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/security/security-scan/security-scan.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/security/security-scan/security-scan.md
deleted file mode 100644
index 32cf9e17a4c..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/security/security-scan/security-scan.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 安全扫描
----
-
-CIS 安全扫描的文档已移至[此处](../../pages-for-subheaders/cis-scan-guides.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md
index 47c806b0278..32bdd10c27c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.6/troubleshooting/other-troubleshooting-tips/networking.md
@@ -102,20 +102,3 @@ title: 网络
* `read tcp: i/o timeout`
有关在 Rancher 和集群节点之间使用 Google Cloud VPN 时如何正确配置 MTU 的示例,请参阅 [Google Cloud VPN:MTU 注意事项](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu)。
-
-### 已解决的问题
-
-#### 由于缺少节点注释,使用 Canal/Flannel 时覆盖网络中断
-
-| | |
-|------------|------------|
-| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) |
-| 解决于 | v2.1.2 |
-
-要检查你的集群是否受到影响,运行以下命令来列出损坏的节点(此命令要求安装 `jq`):
-
-```
-kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name'
-```
-
-如果没有输出,则集群没有影响。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/backups/docker-installs/docker-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/backups/docker-installs/docker-installs.md
deleted file mode 100644
index 51c3001d777..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/backups/docker-installs/docker-installs.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: 备份和恢复 Docker 安装的 Rancher
----
-
-- [备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)
-- [还原](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md
deleted file mode 100644
index 39c332461ce..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/cluster-provisioning/rke-clusters/options/options.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: RKE 集群配置
----
-
-本文已迁移到[此处](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/container-network-interface-providers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/container-network-interface-providers.md
index ad796bb5c4c..0d9c93e18fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/container-network-interface-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/container-network-interface-providers.md
@@ -90,7 +90,7 @@ Kubernetes worker 需要打开 TCP 端口 `6783`(控制端口)、UDP 端口
有关详细信息,请参阅以下页面:
-- [Weave Net 官网](https://www.weave.works/)
+- [Weave Net 官网](https://github.com/weaveworks/weave/blob/master/site/overview.md)
### RKE2 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/dockershim.md
index cfab0dfbaf4..d1dc4c19136 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/faq/dockershim.md
@@ -18,15 +18,15 @@ enable_cri_dockerd: true
-Q. 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
+Q: 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
对于 RKE,Dockershim 的上游支持从 Kubernetes 1.21 开始。你需要使用 Rancher 2.6 或更高版本才能获取使用 Kubernetes 1.21 的 RKE 的支持。详情请参阅我们的[支持矩阵](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
-Q. 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
+Q: 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
-A. 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
+A: 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/installation-references/feature-flags.md
index b6d10b82311..35de5fee14e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/installation-references/feature-flags.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/installation-references/feature-flags.md
@@ -19,7 +19,7 @@ title: 功能开关
以下是 Rancher 中可用的功能开关列表。如果你是从旧 Rancher 版本升级的,你可能会在 Rancher UI 中看到其他功能,例如 `proxy` 或 `dashboard`(均[已中断](/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md)):
- `continuous-delivery`:允许从 Fleet 中单独禁用 Fleet GitOps。有关详细信息,请参阅[持续交付](../../../how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md)。
-- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
+- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md)。
- `harvester`:管理 Virtualization Management 页面的访问。用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。有关详细信息,请参阅 [Harvester 集成](../../../integrations-in-rancher/harvester/overview.md)。
- `istio-virtual-service-ui`:启用[可视界面](../../../how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md)来创建、读取、更新和删除 Istio 虚拟服务和目标规则,这些都是 Istio 流量管理功能。
- `legacy`:启用 2.5.x 及更早版本的一组功能,这些功能正逐渐被新的实现淘汰。它们是已弃用以及后续可用于新版本的功能组合。新的 Rancher 安装会默认禁用此标志。如果你从以前版本的 Rancher 升级,此标志会启用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
index 8f920b8b32e..5f2e3e45795 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
@@ -190,7 +190,7 @@ kubectl edit -n cattle-system deployment/cattle-cluster-agent
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
@@ -275,7 +275,7 @@ helm ls -n cattle-system
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/overview.md
index 1f3b3afa943..7da1159f1e5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/getting-started/overview.md
@@ -42,7 +42,7 @@ Rancher API Server 是基于嵌入式 Kubernetes API Server 和 etcd 数据库
- **配置 Kubernetes 集群**:Rancher API Server 可以在已有节点上[配置 Kubernetes](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md),或进行 [Kubernetes 版本升级](installation-and-upgrade/upgrade-and-roll-back-kubernetes.md)。
- **管理应用商店**:Rancher 支持使用 [Helm Chart 应用商店](../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)实现轻松重复部署应用。
- **管理项目**:项目由集群中多个命名空间和访问控制策略组成,是 Rancher 中的一个概念,Kubernetes 中并没有这个概念。你可以使用项目实现以组为单位,管理多个命名空间,并进行 Kubernetes 相关操作。Rancher UI 提供用于[项目管理](../how-to-guides/advanced-user-guides/manage-projects/manage-projects.md)和[项目内应用管理](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md)的功能。
-- **Fleet 持续交付**:在 Rancher 中,你可以使用 [Fleet 持续交付](../integrations-in-rancher/fleet/fleet.md)将应用程序从 Git 仓库部署到目标下游 Kubernetes 集群,无需任何手动操作。
+- **Fleet 持续交付**:在 Rancher 中,你可以使用 [Fleet 持续交付](../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md)将应用程序从 Git 仓库部署到目标下游 Kubernetes 集群,无需任何手动操作。
- **Istio**:[Rancher 与 Istio 集成](../integrations-in-rancher/istio/istio.md),使得管理员或集群所有者可以将 Istio 交给开发者,然后开发者使用 Istio 执行安全策略,排查问题,或为蓝绿部署,金丝雀部署,和 A/B 测试进行流量管理。
### 配置云基础设施
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
index b10f0b61f1b..bb7cf785839 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -76,11 +76,11 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
1. 输入以下命令:
- ```
- docker run -d --restart=unless-stopped \
- -p 80:80 -p 443:443 \
- rancher/rancher:latest --no-cacerts
- ```
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
index c1f42f70a4b..39e1c46623e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
@@ -2,7 +2,7 @@
title: 持续交付
---
-Rancher 中预装的 [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
+Rancher 中预装的 [Fleet](../../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
index 6cadea62ced..70721fb7276 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -4,7 +4,7 @@ title: 为大型安装进行 etcd 调优
当你运行具有 15 个或更多集群的大型 Rancher 安装时,我们建议你扩大 etcd 的默认 keyspace(默认为 2GB)。你最大可以将它设置为 8GB。此外,请确保主机有足够的 RAM 来保存整个数据集。如果需要增加这个值,你还需要同步增加主机的大小。如果你预计在垃圾回收间隔期间 Pod 的变化率很高,你也可以在较小的安装中调整 Keyspace 大小。
-Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
+Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
### 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
@@ -19,7 +19,7 @@ services:
## 扩展 etcd 磁盘性能
-你可以参见 [etcd 文档](https://etcd.io/docs/v3.4.0/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
+你可以参见 [etcd 文档](https://etcd.io/docs/v3.5/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
此外,为了减少 etcd 磁盘上的 IO 争用,你可以为 data 和 wal 目录使用专用设备。etcd 最佳实践不建议配置 Mirror RAID(因为 etcd 在集群中的节点之间复制数据)。你可以使用 striping RAID 配置来增加可用的 IOPS。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
index 3efac3e477e..0bf174f1ab7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -56,4 +56,4 @@ RKE 模板可以应用于新集群。
- 创建了一个新的 RKE 模板。
- 将集群转换为使用该新模板。
-- 可以[使用新模板创建新集群](apply-templates.md#使用-rke-模板创建集群)。
\ No newline at end of file
+- 可以[使用新模板创建新集群](#使用-rke-模板创建集群)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
index a3cb27d9b6b..1157400dd6a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
@@ -21,7 +21,6 @@ Terraform 是一个服务器配置工具。它使用基础架构即代码,支
Terraform 支持:
- 定义几乎任何类型的基础架构即代码,包括服务器、数据库、负载均衡器、监控、防火墙设置和 SSL 证书
-- 使用应用商店应用和多集群应用
- 跨多个平台(包括 Rancher 和主要云提供商)对基础设施进行编码
- 将基础架构即代码提交到版本控制
- 轻松重复使用基础设施的配置和设置
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
index b9997d6e7b4..9d2a9590372 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
@@ -42,7 +42,7 @@ Rancher 认证代理可以与以下外部认证服务集成。
## 用户和组
-Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目、多集群应用以及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
+Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
:::note
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
index b44cc440312..0fe0a77cebb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
@@ -4,7 +4,7 @@ title: 用户和组
Rancher 依赖用户和组来决定允许登录到 Rancher 的用户,以及他们可以访问哪些资源。你配置外部身份验证提供程序后,该提供程序的用户将能够登录到你的 Rancher Server。用户登录时,验证提供程序将向你的 Rancher Server 提供该用户所属的组列表。
-你可以通过向资源添加用户或组,来控制其对集群、项目、多集群应用、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+你可以通过向资源添加用户或组,来控制其对集群、项目、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
## 管理成员
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
index d99c8f91f92..004c19a58f7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
@@ -48,7 +48,7 @@ ResourceSet 定义了需要备份哪些 Kubernetes 资源。由于备份 Rancher
:::note
-使用 backup-restore-operator 执行恢复后,Fleet 中会出现一个已知问题:用于 clientSecretName 和 helmSecretName 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../../../integrations-in-rancher/fleet/overview.md#故障排除)获得解决方法。
+使用 backup-restore-operator 执行恢复后,Fleet 中会出现一个已知问题:用于 clientSecretName 和 helmSecretName 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md#故障排除)获得解决方法。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
deleted file mode 100644
index 4d4d465d136..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-
-
-
-不同版本的 Rancher 提供了几种不同的方式来部署跨集群应用。
-
-## Fleet
-
-Rancher v2.5 及更高版本使用 Fleet 跨集群部署应用
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](fleet.md)。
-
-### 多集群应用
-
-在 v2.5 之前的 Rancher 中,多集群应用功能用于跨集群部署应用。多集群应用功能已弃用,但仍可作为旧版功能使用。
-
-详情请参阅[此文档](multi-cluster-apps.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
deleted file mode 100644
index def223a8337..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-title: 使用 Feet 进行持续交付
----
-
-使用 Fleet 的持续交付是大规模的 GitOps。你可以使用 Fleet 管理多达一百万个集群。Fleet 非常轻量,可以很好地用于[单个集群](https://fleet.rancher.io/installation#default-install),但是在你达到[大规模](https://fleet.rancher.io/installation#configuration-for-multi-cluster)时,它能发挥更强的实力。此处的大规模指的是大量集群、大量部署、或组织中存在大量团队的情况。
-
-Fleet 是一个独立于 Rancher 的项目,你可以使用 Helm 将它安装在任何 Kubernetes 集群上。
-
-
-## 架构
-
-有关 Fleet 工作原理的信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/architecture.md)。
-
-## 在 Rancher UI 中访问 Fleet
-
-Fleet 预装在 Rancher 中,通过 Rancher UI 中的**持续交付**选项管理。有关持续交付和 Fleet 故障排除技巧的更多信息,请参阅[此处](https://fleet.rancher.io/troubleshooting)。
-
-用户可以通过遵循 **gitops** 的实践,利用持续交付将应用部署到 git 仓库中的 Kubernetes 集群,而无需任何手动操作。
-
-按照以下步骤在 Rancher UI 中访问持续交付:
-
-1. 单击 **☰ > 持续交付**。
-
-1. 在菜单顶部选择你的命名空间,注意以下几点:
- - 默认情况下会选中 `fleet-default`,其中包括注册到 Rancher 的所有下游集群。
- - 你可以切换到仅包含 `local` 集群的 `fleet-local`,或者创建自己的工作空间,并将集群分配和移动到该工作空间。
- - 然后,你可以单击左侧导航栏上的**集群**来管理集群。
-
-1. 单击左侧导航栏上的 **Git 仓库**将 git 仓库部署到当前工作空间中的集群中。
-
-1. 选择你的 [git 仓库](https://fleet.rancher.io/gitrepo-add)和[目标集群/集群组](https://fleet.rancher.io/gitrepo-targets)。你还可以单击左侧导航栏中的**集群组**在 UI 中创建集群组。
-
-1. 部署 git 仓库后,你可以通过 Rancher UI 监控应用。
-
-## Windows 支持
-
-有关对具有 Windows 节点的集群的支持的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md)。
-
-
-## GitHub 仓库
-
-你可以单击此处获取 [Fleet Helm Chart](https://github.com/rancher/fleet/releases/latest)。
-
-
-## 在代理后使用 Fleet
-
-有关在代理后使用 Fleet 的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md)。
-
-## Helm Chart 依赖
-
-由于用户需要完成依赖列表,因此为了成功部署具有依赖项的 Helm Chart,你必须手动运行命令(如下所列)。如果你不这样做,并继续克隆仓库并运行 `helm install`,由于依赖项将丢失,因此你的安装将失败。
-
-git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖项。你必须手动运行 `helm dependencies update $chart`,或在本地运行 `helm dependencies build $chart`,然后将完整的 Chart 目录提交到你的 git 仓库。请注意,你需要使用适当的参数来修改命令。
-
-## 故障排除
-
----
-* **已知问题**:Fleet git 仓库的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。如果我们有了永久的解决方案,我们将通知社区。
-
-* **临时解决方法:**
- 默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则需要重新创建密文。要修改 resourceSet 以包含需要备份的其他资源,请参阅[此文档](https://github.com/rancher/backup-restore-operator#user-flow)。
-
----
-
-## 文档
-
-Fleet 文档链接:[https://fleet.rancher.io/](https://fleet.rancher.io/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
deleted file mode 100644
index 113a30ad48b..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
+++ /dev/null
@@ -1,175 +0,0 @@
----
-title: 多集群应用
----
-
-通常,大多数应用都部署在单个 Kubernetes 集群上,但有时你可能需要跨不同集群和/或项目部署同一应用的多个副本。在 Rancher 中,_多集群应用_ 指的是使用 Helm Chart 跨多个集群部署的应用。由于能够跨多个集群部署相同的应用,因此可以避免在每个集群上重复执行相同的应用配置操作而引入的人为错误。使用多集群应用,你可以通过自定义在所有项目/集群中使用相同的配置,并根据你的目标项目更改配置。由于多集群应用被视为单个应用,因此更容易管理和维护。
-
-全局应用商店中的任何 Helm Chart 都可用于部署和管理多集群应用。
-
-创建多集群应用后,你可以对全局 DNS 条目进行编程,以便更轻松地访问应用。
-
-## 先决条件
-
-### 权限
-
-要在 Rancher 中创建多集群应用,你至少需要具有以下权限之一:
-
-- 目标集群中的[项目成员角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),能够创建、读取、更新和删除工作负载
-- 目标项目所在集群的[集群所有者角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)
-
-### 启用旧版功能
-
-由于 Rancher 2.5 已弃用多集群应用并使用 Fleet 取代它,你需要使用功能开关以启用多集群应用。
-
-1. 在左上角,单击 **☰ > 全局设置**。
-1. 单击**功能开关**。
-1. 转到 `Legacy` 功能开关并单击**激活**。
-
-## 启动多集群应用
-
-1. 在左上角,单击**☰ > 多集群应用**。
-1. 点击**启动**。
-1. 找到要启动的应用。
-1. (可选)查看来自 Helm Chart `README` 的详细描述。
-1. 在**配置选项**下输入多集群应用的**名称**。默认情况下,此名称还用于在每个[目标项目](#目标)中为多集群应用创建一个 Kubernetes 命名空间。命名空间命名为 `-`。
-1. 选择一个**模板版本**。
-1. 完成[多集群应用配置选项](#多集群应用配置选项)以及[应用配置选项](#应用配置选项)。
-1. 选择可以[与多集群应用交互](#成员)的**成员**。
-1. 添加[自定义应用配置答案](#覆盖特定项目的应用配置选项),这将更改默认应用配置答案中特定项目的配置。
-1. 查看**预览**中的文件。确认后,单击**启动**。
-
-**结果**:应用已部署到所选的命名空间。你可以从项目中查看应用状态。
-
-## 多集群应用配置选项
-
-Rancher 将多集群应用的配置选项分为以下几个部分。
-
-### 目标
-
-在**目标**部分中,选择用于部署应用的项目。项目列表仅显示你有权访问的项目。所选的每个项目都会被添加到列表中,其中显示了所选的集群名称和项目名称。要移除目标项目,单击 **-**。
-
-### 升级
-
-在**升级**部分中,选择升级应用时需要使用的升级策略。
-
-* **滚动更新(批量)**:选择此升级策略时,每次升级的应用数量取决于选择的**批量大小**和**间隔**(多少秒后才开始下一批更新)。
-
-* **同时升级所有应用**:选择此升级策略时,所有项目的所有应用都将同时升级。
-
-### 角色
-
-在**角色**中,你可以定义多集群应用的角色。通常,当用户[启动商店应用](../../../pages-for-subheaders/helm-charts-in-rancher.md)时,该用户的权限会用于创建应用所需的所有工作负载/资源。
-
-多集群应用由 _系统用户_ 部署,系统用户还被指定为所有底层资源的创建者。由于实际用户可以从某个目标项目中删除,因此使用 _系统用户_ 而不是实际用户。如果实际用户从其中一个项目中删除,则该用户将不再能够管理其他项目的应用。
-
-Rancher 允许你选择**项目**或**集群**的角色选项。Rancher 将允许你根据用户的权限使用其中一个角色进行创建。
-
-- **项目** - 相当于[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _项目成员_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),则认为该用户具有所需的权限级别。
-
-- **集群** - 相当于[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _集群所有者_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md),则认为该用户具有所需的权限级别。
-
-在启动应用时,Rancher 会在启动应用之前确认你在目标项目中是否拥有这些权限。
-
-:::note
-
-某些应用(如 _Grafana_ 或 _Datadog_)需要访问特定集群级别的资源。这些应用将需要 _集群_ 角色。如果你之后发现应用需要集群角色,则可以升级多集群应用以更新角色。
-
-:::
-
-## 应用配置选项
-
-对于每个 Helm Chart,你需要输入一个必须的答案列表才能成功部署 Chart。由于 Rancher 会将答案作为 `--set` 标志传递给 Helm,因此你必须按照[使用 Helm:–set 的格式和限制](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set)中的语法规则来格式化这些答案。
-
-:::note 示例
-
-当输入的答案包含用逗号分隔的两个值(即 `abc, bcd`)时,你需要用双引号将值括起来(即 ``"abc, bcd" ``)。
-
-:::
-
-### 使用 questions.yml 文件
-
-如果你部署的 Helm Chart 包含 `questions.yml` 文件,Rancher UI 会将此文件转换成易于使用的 UI 来收集问题的答案。
-
-### 原生 Helm Chart 的键值对
-
-对于原生 Helm Chart(即来自 **Helm Stable** 或 **Helm Incubator** 应用商店或自定义 Helm Chart 仓库的 Chart),答案会在 **Answers** 中以键值对的形式提供。这些答案能覆盖默认值。
-
-### 成员
-
-默认情况下,多集群应用只能由应用的创建者管理。你可以在**成员**中添加其他用户,以便这些用户管理或查看多集群应用。
-
-1. 在**成员**搜索框中键入成员的名称,查找要添加的用户。
-
-2. 为该成员选择**访问类型**。多集群项目有三种访问类型,请仔细阅读并了解这些访问类型的含义,以了解多集群应用权限的启用方式。
-
- - **所有者**:此访问类型可以管理多集群应用的任何配置,包括模板版本、[多集群应用配置选项](#多集群应用配置选项),[应用配置选项](#应用配置选项),可以与多集群应用交互的成员,以及[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _所有者_ 都可以管理/删除[目标项目](#目标)中的应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **成员**:此访问类型只能修改模板版本、[应用配置选项](#应用配置选项)和[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _成员_ 都可以修改应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **只读**:此访问类型不能修改多集群应用的任何配置选项。用户只能查看这些应用。
-
- :::caution
-
- 请确保仅为受信任的用户授予 _所有者_ 或 _成员_ 访问权限,因为这些用户即使无法直接访问项目,也将自动能够管理为此多集群应用创建的应用。
-
- :::
-
-### 覆盖特定项目的应用配置选项
-
-多集群应用的主要优势之一,是能够在多个集群/项目中使用相同配置部署相同的应用。在某些情况下,你可能需要为某个特定项目使用稍微不同的配置选项,但你依然希望统一管理该应用与其他匹配的应用。此时,你可以为该项目覆盖特定的[应用配置选项](#应用配置选项),而不需要创建全新的应用。
-
-1. 在**答案覆盖**中,单击**添加覆盖**。
-
-2. 对于每个覆盖,你可以选择以下内容:
-
- - **范围**:在配置选项中选择要覆盖哪些目标项目的答案。
-
- - **问题**:选择要覆盖的问题。
-
- - **答案**:输入要使用的答案。
-
-## 升级多集群应用角色和项目
-
-- **在现有的多集群应用上更改角色**
- 多集群应用的创建者和任何具有“所有者”访问类型的用户都可以升级其**角色**。添加新角色时,我们会检查用户在所有当前目标项目中是否具有该角色。Rancher 会根据 `Roles` 字段的安装部分,相应地检查用户是否具有全局管理员、集群所有者或项目所有者的角色。
-
-- **添加/删除目标项目**
-1. 多集群应用的创建者和任何具有“所有者”访问类型的用户都添加或移除目标项目。添加新项目时,我们检查此请求的调用者是否具有多集群应用中定义的所有角色。Rancher 会检查用户是否具有全局管理员、集群所有者和项目所有者的角色。
-2. 删除目标项目时,我们不会进行这些成员资格检查。这是因为调用者的权限可能与目标项目有关,或者由于该项目已被删除导致调用者希望将该项目从目标列表中删除。
-
-
-## 多集群应用管理
-
-与同一类型的多个单独应用相比,使用多集群应用的好处之一是易于管理。你可以克隆、升级或回滚多集群应用。
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要对其执行操作的多集群应用,然后单击 **⋮**。选择以下选项之一:
-
- * **克隆**:创建另一个具有相同配置的多集群应用。通过使用此选项,你可以轻松复制多集群应用。
- * **升级**:升级多集群应用以更改某些配置。在为多集群应用执行升级时,如果你有合适的[访问类型](#成员),则可以修改[升级策略](#升级)。
- * **回滚**:将你的应用回滚到特定版本。如果你的一个或多个[目标](#目标)的多集群应用在升级后出现问题,你可以使用 Rancher 存储的多达 10 个多集群应用版本进行回滚。回滚多集群应用会恢复**所有**目标集群和项目的应用,而不仅仅是受升级问题影响的目标。
-
-## 删除多集群应用
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要删除的多集群应用,然后单击**⋮ > 删除**。删除多集群应用会删除所有目标项目中的所有应用和命名空间。
-
- :::note
-
- 不能独立删除在目标项目中为多集群应用创建的应用。只有删除多集群应用后才能删除这些应用。
-
- :::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index 313585d60dd..f9b2b0d8dfd 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -48,5 +48,5 @@ title: 生产就绪集群检查清单
### 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 (../set-up-cloud-providers/set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
index a0ec43f76c5..c6b1ef60709 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -53,7 +53,7 @@ title: 推荐的集群架构
参考:
-* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance)
+* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.5/faq/#what-is-failure-tolerance)
* [为 Kubernetes 操作 etcd 集群的官方 Kubernetes 文档](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
### Worker 节点数
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
index 85bcf4b7b11..7e7479042a4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
@@ -104,7 +104,7 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
有关大型 Kubernetes 集群的硬件建议,请参阅[构建大型集群](https://kubernetes.io/docs/setup/best-practices/cluster-large/)的官方 Kubernetes 文档。
-有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.4.0/op-guide/hardware/)。
+有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.5/op-guide/hardware/)。
## 网络要求
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
index 364a4012eee..04af6ebb911 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
@@ -19,7 +19,7 @@ kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**
下载 kubeconfig 文件后,你将能够使用 kubeconfig 文件及其 Kubernetes [上下文](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration)访问下游集群。
-如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](./authorized-cluster-endpoint.md) 存在于你的 PATH 中。
+如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](../../../../reference-guides/cli-with-rancher/rancher-cli.md) 存在于你的 PATH 中。
### RKE 集群的两种身份验证方法
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
index 6582e5e0f50..8ea6ecae323 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
@@ -66,7 +66,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **StatefulSet**。
1. 在**卷声明模板**选项卡上,单击**添加声明模板**。
1. 输入持久卷的名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 点击**启动**。
@@ -80,7 +80,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **⋮ > 编辑配置**,转到使用由 StorageClass 配置的存储的工作负载。
1. 在**卷声明模板**中,单击**添加声明模板**。
1. 输入持久卷名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 单击**保存**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
index a262560e90f..7b2627dc6a1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
@@ -300,7 +300,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
| max-node-provision-time | "15m" | CA 等待节点配置的最长时间 |
| nodes | - | 以云提供商接受的格式设置节点组的最小、最大大小和其他配置数据。可以多次使用。格式是 `::`。 |
| node-group-auto-discovery | - | 节点组自动发现的一个或多个定义。定义表示为 `:[[=]]` |
-| estimator | - | "binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
+| estimator |"binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
| expander | "random" | 要在扩容中使用的节点组扩展器的类型。可用值:`["random","most-pods","least-waste","price","priority"]` |
| ignore-daemonsets-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 DaemonSet pod |
| ignore-mirror-pods-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 Mirror pod |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
index f8e5d94e510..69081a92dfb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
@@ -169,12 +169,12 @@ Rancher 在 Kubernetes 之上进行了扩展,除了集群级别之外,还允
### 4. 可选:添加资源配额
-资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
要添加资源配额:
1. 在**资源配额**选项卡中,单击**添加资源**。
-1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 输入**项目限制**和**命名空间默认限制**的值。
1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../../pages-for-subheaders/manage-project-resource-quotas.md)。
1. 单击**创建**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md
index 1c8aeba0767..3eb48ae9857 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/how-to-guides/new-user-guides/manage-namespaces.md
@@ -21,7 +21,7 @@ title: 命名空间
:::note
-如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](manage-namespaces.md),以确保你有权访问该命名空间。
+如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](#创建命名空间),以确保你有权访问该命名空间。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/cluster-api/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/cluster-api/overview.md
index 5da4470bfd6..531464d5d9b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/cluster-api/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/cluster-api/overview.md
@@ -185,7 +185,7 @@ stringData:
:::note
-请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/tasks/capi-operator/intro)
+请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/contributing/install_capi_operator)
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/integrations-in-rancher.mdx b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/integrations-in-rancher.mdx
deleted file mode 100644
index c0a824cc9da..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/integrations-in-rancher.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
----
-title: Rancher 中的集成
----
-
-
-
-
-
-import { Card, CardSection } from "@site/src/components/CardComponents";
-import { RocketRegular } from "@fluentui/react-icons";
-
-Prime 是 Rancher 生态系统的企业级产品,具有更高的安全性、更长的生命周期和对 Prime 专有文档的访问权限。Rancher Prime 安装资产托管在受信任的 SUSE 注册表上,由 Rancher 拥有和管理。受信任的 Prime 注册表仅包括经过社区测试的稳定版本。
-
-Prime 还提供生产支持选项,以及根据你的商业需求定制的订阅附加组件。
-
-要了解更多信息并开始使用 Rancher Prime,请访问[本页](https://www.rancher.com/quick-start)。
-
- }>
-
-
-
-
-
-
-
-
-
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/istio/cpu-and-memory-allocations.md
index 3a19c21dcc5..657baa84fd8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/istio/cpu-and-memory-allocations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/integrations-in-rancher/istio/cpu-and-memory-allocations.md
@@ -41,7 +41,7 @@ Kubernetes 中的资源请求指的是,除非该节点至少具有指定数量
1. 在左侧导航栏中,点击 **Apps**。
1. 点击**已安装的应用**。
1. 转到 `istio-system` 命名空间。在某个 Istio 工作负载中(例如 `rancher-istio`),点击**⋮ > 编辑/升级**。
-1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](cpu-and-memory-allocations.md#编辑覆盖文件)。
+1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](#编辑覆盖文件)。
1. 更改 CPU 或内存分配、调度各个组件的节点,或节点容忍度。
1. 点击**升级**。然后,更改就能启用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md
index 5c925f124ed..0b208d527d4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/checklist-for-production-ready-clusters.md
@@ -44,5 +44,5 @@ title: 生产就绪集群检查清单
### 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md
deleted file mode 100644
index 40e099ac09a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
-
-### 多集群应用
-
-在 2.5 之前的 Rancher 版本中,多集群应用功能用于跨集群部署应用。我们已弃用多集群应用功能,但你仍然可以在 Rancher 2.5 中使用该功能。
-
-详情请参阅[此文档](../how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md
index fc356dfda17..fc57fb2b168 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/pages-for-subheaders/k3s-hardening-guide.md
@@ -85,7 +85,7 @@ The `rancher-restricted` template is provided by Rancher to enforce the highly-r
-K3s v1.24 and older support [Pod Security Policy (PSP)](https://v1-24.docs.kubernetes.io/docs/concepts/security/pod-security-policy/) for controlling pod security.
+K3s v1.24 and older support [Pod Security Policy (PSP)](https://github.com/kubernetes/website/blob/release-1.24/content/en/docs/concepts/security/pod-security-policy.md) for controlling pod security.
You can enable PSPs by passing the following flags in the cluster configuration in Rancher:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
index 0c47b75fa06..793ed566db2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
@@ -14,7 +14,7 @@ title: Rancher 运行技巧
不要在安装了 Rancher 的 Kubernetes 集群上运行其他工作负载或微服务。
### 确保 Kubernetes 节点配置正确
-在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.4/op-guide/performance/)。
+在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.5/op-guide/performance/)。
### 使用 RKE 时:备份状态文件(Statefile)
RKE 将集群状态记录在一个名为 `cluster.rkestate` 的文件中,该文件对集群的恢复和/或通过 RKE 维护集群非常重要。由于这个文件包含证书材料,我们强烈建议在备份前对该文件进行加密。请在每次运行 `rke up` 后备份状态文件。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
index 8d5fc725d36..8e98f63729f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
@@ -56,6 +56,6 @@ Rancher 的大部分逻辑都发生在事件处理程序上。每当更新对象
与 Rancher 版本类似,我们建议让你的 kubernetes 集群保持使用最新版本。这将确保你的集群能包含可用的性能增强或错误修复。
### 优化 ETCD
-[ETCD 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
+[ETCD 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
理论上,ETCD 集群中的节点越多,由于复制要求 [source](https://etcd.io/docs/v3.3/faq),它就会越慢。这可能与常见的缩放方法相悖。我们还可以推断,ETCD 的性能将受到节点间距离的反面影响,因为这将减慢网络通信。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
index 552e79ec76b..154b01c5ad4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
@@ -110,7 +110,7 @@ Rancher 的大部分逻辑发生在 Event Handler 上。每当资源对象产生
Etcd 是 Kubernetes 和 Rancher 的后端数据库,在 Rancher 性能中扮演重要的角色。
-[Etcd 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
+[Etcd 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
根据 etcd 的[复制机制](https://etcd.io/docs/v3.5/faq/#what-is-maximum-cluster-size),建议在三个节点上运行 etcd,运行在更多的节点上反而会降低速度。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
index 3f4328512ae..96e2cc56603 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
@@ -140,13 +140,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### CoreDNS
-默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking#coredns)。
+默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#coredns)。
#### NGINX Ingress
-如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
-有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
#### Metrics Server
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
index d40edd47e96..d3c891e156c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
@@ -102,7 +102,7 @@ Rancher 提供了 `rancher-restricted` 模板,用于强制执行高度限制
-K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://v1-24.docs.kubernetes.io/docs/concepts/security/pod-security-policy/) 以控制 Pod 安全性。
+K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://github.com/kubernetes/website/blob/release-1.24/content/en/docs/concepts/security/pod-security-policy.md) 以控制 Pod 安全性。
你可以在 Rancher 中通过集群配置,传递以下标志来启用 PSPs:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md
index c39522aaa1b..567869ba1c6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/reference-guides/rancher-security/security-advisories-and-cves.md
@@ -27,10 +27,10 @@ Rancher 致力于向社区披露我们产品的安全问题。我们会针对已
| [CVE-2022-31247](https://github.com/rancher/rancher/security/advisories/GHSA-6x34-89p7-95wg) | 在 Rancher 2.5.15 和 2.6.6 及之前的版本中发现了一个问题。授权逻辑缺陷允许在下游集群中通过集群角色模板绑定 (CRTB) 和项目角色模板绑定 (PRTB) 来提升权限。任何有权限创建/编辑 CRTB 或 PRTB 的用户(例如 `cluster-owner`、`manage cluster members`、`project-owner` 和 `manage project members`)都可以利用该漏洞,在同一集群的另一个项目或不同下游集群的另一个项目中获得所有者权限。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36783](https://github.com/rancher/rancher/security/advisories/GHSA-8w87-58w6-hfv8) | 2.5.12 到 2.6.3 的 Rancher 版本无法正确清理集群模板 answer 中的凭证。此错误可能会导致明文存储以及凭证、密码和 API 令牌被暴露。在 Rancher 中,已认证的 `Cluster Owner`、`Cluster Member`、`Project Owner` 和 `Project Member` 可以在 `/v1/management.cattle.io.clusters`、`/v3/clusters` 和 `/k8s/clusters/local/apis/management.cattle.io/v3/clusters` 端点上看到暴露的凭证。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
| [CVE-2021-36782](https://github.com/rancher/rancher/security/advisories/GHSA-g7j7-h4q8-8w2f) | 在 2.5.15 到 2.6.6 的 Rancher 版本中发现了一个问题,其中密码、API 密钥和 Rancher 的 ServiceAccount 令牌(用于配置集群)等敏感字段直接以明文形式存储在 `Cluster` 等 Kubernetes 对象上(例如,`cluster.management.cattle.io`)。任何能够读取 Kubernetes API 中的对象的用户都可以检索这些敏感数据的明文版本。该问题由 Florian Struck(来自 [Continum AG](https://www.continum.net/))和 [Marco Stuurman](https://github.com/fe-ax)(来自 [Shock Media B.V.](https://www.shockmedia.nl/))发现并报告。 | 2022 年 8 月 18 日 | [Rancher 2.6.7](https://github.com/rancher/rancher/releases/tag/v2.6.7) 和 [Rancher 2.5.16](https://github.com/rancher/rancher/releases/tag/v2.5.16) |
-| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](../../pages-for-subheaders/about-rke1-templates.md)配置 [Weave](../../faq/container-network-interface-providers.md#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
+| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | 此漏洞仅影响通过 [RKE 模板](../../pages-for-subheaders/about-rke1-templates.md)配置 [Weave](../../faq/container-network-interface-providers.md#weave) 容器网络接口 (CNI) 的客户。在 Rancher 2.5.0 到 2.5.13 和 Rancher 2.6.0 到 2.6.4 版本中发现了一个漏洞。如果将 CNI 选为 Weave,RKE 模板的用户界面 (UI) 不包括 Weave 密码的值。如果基于上述模板创建集群,并且将 Weave 配置为 CNI,则 Weave 中不会为[网络加密](https://github.com/weaveworks/weave/blob/master/site/tasks/manage/security-untrusted-networks.md)创建密码。因此,集群中的网络流量将不加密发送。 | 2022 年 5 月 24 日 | [Rancher 2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) 和 [Rancher 2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) |
| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | 在 Rancher 2.5.0 到 2.5.12 和 Rancher 2.6.0 到 2.6.3 中发现了一个漏洞,该漏洞允许能创建或更新[全局角色](../../pages-for-subheaders/manage-role-based-access-control-rbac.md)的用户将他们或其他用户升级为管理员。全局角色能授予用户 Rancher 级别的权限,例如能创建集群。在已识别的 Rancher 版本中,如果用户被授予了编辑或创建全局角色的权限,他们不仅仅能授予他们已经拥有的权限。此漏洞影响使用能够创建或编辑全局角色的非管理员用户的客户。此场景最常见的用例是 `restricted-admin` 角色。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | 此漏洞仅影响在 Rancher 中使用 `restricted-admin` 角色的客户。在 Rancher 2.5.0 到 2.5.12 和 2.6.0 到 2.6.3 中发现了一个漏洞,其中 `cattle-global-data` 命名空间中的 `global-data` 角色授予了应用商店的写权限。由于具有任何级别的应用商店访问权限的用户都会绑定到 `global-data` 角色,因此这些用户都能写入模板 `CatalogTemplates`) 和模板版本 (`CatalogTemplateVersions`)。在 Rancher 中创建的新用户默认分配到 `user` 角色(普通用户),该角色本不该具有写入应用商店的权限。此漏洞提升了能写入应用商店模板和应用商店模板版本资源的用户的权限。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
-| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | 此漏洞仅影响使用经过认证的 Git 和/或 Helm 仓库通过 [Fleet](../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) 进行持续交付的客户。在 [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) 之前版本中的 `go-getter` 库中发现了一个问题,错误消息中没有删除 Base64 编码的 SSH 私钥,导致该信息暴露。Rancher 中 [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9) 之前的 Fleet 版本使用了该库的漏洞版本。此问题影响 Rancher 2.5.0 到 2.5.12(包括 2.5.12)以及 2.6.0 到 2.6.3(包括 2.6.3)。该问题由 Raft Engineering 的 Dagan Henderson 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
+| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | 此漏洞仅影响使用经过认证的 Git 和/或 Helm 仓库通过 [Fleet](../../integrations-in-rancher/fleet-gitops-at-scale/fleet-gitops-at-scale.md) 进行持续交付的客户。在 [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) 之前版本中的 `go-getter` 库中发现了一个问题,错误消息中没有删除 Base64 编码的 SSH 私钥,导致该信息暴露。Rancher 中 [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9) 之前的 Fleet 版本使用了该库的漏洞版本。此问题影响 Rancher 2.5.0 到 2.5.12(包括 2.5.12)以及 2.6.0 到 2.6.3(包括 2.6.3)。该问题由 Raft Engineering 的 Dagan Henderson 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) 和 [Rancher 2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) |
| [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | 在 Rancher 2.5.0 到 2.5.11 和 Rancher 2.6.0 到 2.6.2 中发现了一个漏洞,当从配置的私有仓库下载 Helm Chart 时,对同源策略的检查不足可能导致仓库凭证暴露给第三方提供商。仅当用户在 Rancher 的`应用 & 应用市场 > 仓库`中配置私有仓库的访问凭证时才会出现此问题。该问题由 Martin Andreas Ullrich 发现并报告。 | 2022 年 4 月 14 日 | [Rancher 2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) 和 [Rancher 2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) |
| [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | 在 Rancher 2.0 到 2.6.3 中发现了一个漏洞。Rancher 提供的 `restricted` Pod 安全策略(PSP)与 Kubernetes 提供的上游 `restricted` 策略有差别,因此 Rancher 的 PSP 将 `runAsUser` 设置为 `runAsAny`,而上游将 `runAsUser` 设置为 `MustRunAsNonRoot`。因此,即使 Rancher 的 `restricted` 策略是在项目或集群级别上强制执行的,容器也可以以任何用户身份运行,包括特权用户 (`root`)。 | 2022 年 3 月 31 日 | [Rancher 2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) |
| [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | 在 Rancher 2.4.17、2.5.11 和 2.6.2 以及更高的版本中发现了一个漏洞。从项目中删除与某个组关联的`项目角色`后,能让这些使用者访问集群级别资源的绑定(Binding)不会被删除。导致问题的原因是不完整的授权逻辑检查。如果用户是受影响组中的成员,且能对 Rancher 进行认证访问,那么用户可以利用此漏洞访问他们不应该能访问的资源。暴露级别取决于受影响项目角色的原始权限级别。此漏洞仅影响在 Rancher 中基于组进行身份验证的客户。 | 2022 年 3 月 31 日 | [Rancher 2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3)、[Rancher 2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) 和 [Rancher 2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/security/security-scan/security-scan.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/security/security-scan/security-scan.md
deleted file mode 100644
index 32cf9e17a4c..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/security/security-scan/security-scan.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 安全扫描
----
-
-CIS 安全扫描的文档已移至[此处](../../pages-for-subheaders/cis-scan-guides.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md
index 47c806b0278..32bdd10c27c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/troubleshooting/other-troubleshooting-tips/networking.md
@@ -102,20 +102,3 @@ title: 网络
* `read tcp: i/o timeout`
有关在 Rancher 和集群节点之间使用 Google Cloud VPN 时如何正确配置 MTU 的示例,请参阅 [Google Cloud VPN:MTU 注意事项](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu)。
-
-### 已解决的问题
-
-#### 由于缺少节点注释,使用 Canal/Flannel 时覆盖网络中断
-
-| | |
-|------------|------------|
-| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) |
-| 解决于 | v2.1.2 |
-
-要检查你的集群是否受到影响,运行以下命令来列出损坏的节点(此命令要求安装 `jq`):
-
-```
-kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name'
-```
-
-如果没有输出,则集群没有影响。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/v2.7.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.7/v2.7.md
deleted file mode 100644
index 4e36ad8730b..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.7/v2.7.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: v2.7
-description: Dummy file used to redirect to the base url
----
-
-
-
-import {Redirect} from '@docusaurus/router';
-
-const Home = () => {
-return ;
-};
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/api-tokens.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/api-tokens.md
similarity index 91%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/api-tokens.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/api-tokens.md
index 59b103fa476..e730cbb3d22 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/about-the-api/api-tokens.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/api-tokens.md
@@ -40,7 +40,7 @@ title: API 令牌
### 在生成的 Kubeconfig 中禁用令牌
-1. 将 `kubeconfig-generate-token` 设置为 `false`。此设置让 Rancher 不再在用户单击下载 kubeconfig 文件时自动生成令牌。如果停用此设置,生成的 kubeconfig 将引用 [Rancher CLI](../cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证) 来检索集群的短期令牌。当这个 kubeconfig 在客户端(例如 `kubectl`)中使用时,你需要安装 Rancher CLI 来完成登录请求。
+1. 将 `kubeconfig-generate-token` 设置为 `false`。此设置让 Rancher 不再在用户单击下载 kubeconfig 文件时自动生成令牌。如果停用此设置,生成的 kubeconfig 将引用 [Rancher CLI](../reference-guides/cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证) 来检索集群的短期令牌。当这个 kubeconfig 在客户端(例如 `kubectl`)中使用时,你需要安装 Rancher CLI 来完成登录请求。
2. 将 `kubeconfig-token-ttl-minutes` 设置为所需的时长(单位:分钟)。`kubeconfig-token-ttl-minutes` 默认设置为 960(即 16 小时)。
@@ -48,7 +48,7 @@ title: API 令牌
你可以启用令牌哈希,令牌将使用 SHA256 算法进行单向哈希。这是一个不可逆的操作,一旦启用,此功能将无法禁用。在启用功能或在测试环境中评估之前,建议你先进行备份。
-要启用令牌哈希,请参阅[本节](../../pages-for-subheaders/enable-experimental-features.md)。
+要启用令牌哈希,请参阅[本节](../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
此功能将影响所有令牌,包括但不限于以下内容:
@@ -83,4 +83,4 @@ title: API 令牌
**2.6.6 版本更改:适用于所有 kubeconfig 令牌和 API 令牌。**
#### kubeconfig-generate-token
-如果设置为 true,则通过 UI 请求的 kubeconfig 将包含一个有效的令牌。如果设置为 false,kubeconfig 将包含一个使用 Rancher CLI 提示用户登录的命令。然后,[CLI 将为用户检索和缓存令牌](../cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证)。
+如果设置为 true,则通过 UI 请求的 kubeconfig 将包含一个有效的令牌。如果设置为 false,kubeconfig 将包含一个使用 Rancher CLI 提示用户登录的命令。然后,[CLI 将为用户检索和缓存令牌](../reference-guides/cli-with-rancher/kubectl-utility.md#使用-kubectl-和-kubeconfig-令牌进行-ttl-认证)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-the-api.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/v3-rancher-api-guide.md
similarity index 84%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-the-api.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/v3-rancher-api-guide.md
index db0ad233862..89872d72845 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-the-api.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/api/v3-rancher-api-guide.md
@@ -2,6 +2,10 @@
title: API
---
+
+
+
+
## 如何使用 API
API 有自己的用户界面,你可以从 Web 浏览器访问它。这是查看资源、执行操作以及查看等效 cURL 或 HTTP 请求和响应的一种简单的方法。要访问它:
@@ -21,11 +25,11 @@ API 有自己的用户界面,你可以从 Web 浏览器访问它。这是查
-## 身份验证
+## 认证
-API 请求必须包含身份验证信息。身份验证是通过 [API 密钥](../reference-guides/user-settings/api-keys.md)使用 HTTP 基本身份验证完成的。API 密钥可以创建新集群并通过 `/v3/clusters/` 访问多个集群。[集群和项目角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)会应用于这些键,并限制账号可以查看的集群和项目以及可以执行的操作。
+API 请求必须包含认证信息。认证是通过 [API 密钥](../reference-guides/user-settings/api-keys.md)使用 HTTP 基本认证完成的。API 密钥可以创建新集群并通过 `/v3/clusters/` 访问多个集群。[集群和项目角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)会应用于这些键,并限制账号可以查看的集群和项目以及可以执行的操作。
-默认情况下,某些集群级别的 API 令牌是使用无限期 TTL(`ttl=0`)生成的。换言之,除非你让令牌失效,否则 `ttl=0` 的 API 令牌永远不会过期。有关如何使 API 令牌失效的详细信息,请参阅 [API 令牌](../reference-guides/about-the-api/api-tokens.md)。
+默认情况下,某些集群级别的 API 令牌是使用无限期 TTL(`ttl=0`)生成的。换言之,除非你让令牌失效,否则 `ttl=0` 的 API 令牌永远不会过期。有关如何使 API 令牌失效的详细信息,请参阅 [API 令牌](api-tokens.md)。
## 发出请求
@@ -78,3 +82,12 @@ API 请求必须包含身份验证信息。身份验证是通过 [API 密钥](..
1. 在 Rancher UI 中,单击**创建**。在开发者工具中,你应该会看到一个名为 `cluster?_replace=true` 的新网络请求。
1. 右键单击 `cluster?_replace=true` 并单击**复制 > 复制为 cURL**。
1. 将结果粘贴到文本编辑器中。你将能够看到 POST 请求,包括被发送到的 URL、所有标头以及请求的完整正文。此命令可用于从命令行创建集群。请注意,请求包含凭证,因此请将请求存储在安全的地方。
+
+### 启用在 API 中查看
+
+你还可以查看针对各自集群和资源捕获的 Rancher API 调用。 默认情况下不启用此功能。 要启用它:
+
+1. 单击 UI 右上角的 **用户图标**,然后从下拉菜单中选择 **偏好设置**
+1. 在**高级功能**部分下,单击**启用"在 API 中查看"**
+
+选中后,**在 API 中查看**链接现在将显示在 UI 资源页面上的 **⋮** 子菜单下。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/backups/docker-installs/docker-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/backups/docker-installs/docker-installs.md
deleted file mode 100644
index 51c3001d777..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/backups/docker-installs/docker-installs.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: 备份和恢复 Docker 安装的 Rancher
----
-
-- [备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)
-- [还原](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/cluster-provisioning/rke-clusters/options/options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/cluster-provisioning/rke-clusters/options/options.md
deleted file mode 100644
index 39c332461ce..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/cluster-provisioning/rke-clusters/options/options.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: RKE 集群配置
----
-
-本文已迁移到[此处](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/container-network-interface-providers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/container-network-interface-providers.md
index ad796bb5c4c..0d9c93e18fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/container-network-interface-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/container-network-interface-providers.md
@@ -90,7 +90,7 @@ Kubernetes worker 需要打开 TCP 端口 `6783`(控制端口)、UDP 端口
有关详细信息,请参阅以下页面:
-- [Weave Net 官网](https://www.weave.works/)
+- [Weave Net 官网](https://github.com/weaveworks/weave/blob/master/site/overview.md)
### RKE2 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features-in-v2.5.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features-in-v2.5.md
index ec1898663b2..0e11d4e793a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features-in-v2.5.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features-in-v2.5.md
@@ -2,11 +2,11 @@
title: Rancher 弃用的功能
---
-### Rancher 的弃用策略是什么?
+## Rancher 的弃用策略是什么?
我们在支持[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方弃用策略。
-### 在哪里可以找到 Rancher 已弃用的功能?
+## 在哪里可以找到 Rancher 已弃用的功能?
Rancher 会在 GitHub 上的[发行说明](https://github.com/rancher/rancher/releases)中公布已弃用的功能。请参阅以下补丁版本了解已弃用的功能:
@@ -20,7 +20,6 @@ Rancher 会在 GitHub 上的[发行说明](https://github.com/rancher/rancher/re
| [2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) | 2022 年 5 月 12 日 |
| [2.6.6](https://github.com/rancher/rancher/releases/tag/v2.6.6) | 2022 年 6 月 30 日 |
+## 如果某个功能标记为弃用,我要怎么做?
-### 如果某个功能标记为弃用,我要怎么做?
-
-如果某个发行版将某功能标记为"Deprecated"(已弃用),该功能仍然可用并受支持,从而允许用户按照常规流程进行升级。在升级到该功能被标记为"已删除"的发行版前,用户/管理员应该计划剥离该功能。对于新部署,我们建议不要使用已弃用的功能。
\ No newline at end of file
+如果某个发行版将某功能标记为"Deprecated"(已弃用),该功能仍然可用并受支持,从而允许用户按照常规流程进行升级。在升级到该功能被标记为"已删除"的发行版前,用户/管理员应该计划剥离该功能。对于新部署,我们建议不要使用已弃用的功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features.md
index b594bd0a42b..121efe550dd 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/deprecated-features.md
@@ -6,11 +6,11 @@ title: Rancher 中已弃用的功能
-### Rancher 的弃用策略是什么?
+## Rancher 的弃用策略是什么?
我们已经在支持的[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方的弃用策略。
-### 在哪里可以了解 Rancher 中已弃用哪些功能?
+## 在哪里可以了解 Rancher 中已弃用哪些功能?
Rancher 将在 GitHub 上发布的 Rancher 的[发版说明](https://github.com/rancher/rancher/releases)中发布已弃用的功能。有关已弃用的功能,请参阅以下的补丁版本:
@@ -21,6 +21,6 @@ Rancher 将在 GitHub 上发布的 Rancher 的[发版说明](https://github.com/
| [2.8.1](https://github.com/rancher/rancher/releases/tag/v2.8.1) | 2024 年 1 月 22 日 |
| [2.8.0](https://github.com/rancher/rancher/releases/tag/v2.8.0) | 2023 年 12 月 6 日 |
-### 当一个功能被标记为弃用我可以得到什么样的预期?
+## 当一个功能被标记为弃用我可以得到什么样的预期?
当功能被标记为“已弃用”时,它依然可用并得到支持,允许按照常规的流程进行升级。一旦升级完成,用户/管理员应开始计划在升级到标记为已移除的版本之前放弃使用已弃用的功能。对于新的部署,建议不要使用已弃用的功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/dockershim.md
index cfab0dfbaf4..cb1d658b131 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/dockershim.md
@@ -14,19 +14,19 @@ enable_cri_dockerd: true
如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。即使在 Kubernetes 1.24 删除了树内 Dockershim 之后,你也可以通过 Rancher 升级和管理导入的 RKE2 和 K3s Kubernetes 集群。
-### 常见问题
+## 常见问题
-Q. 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
+Q: 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
对于 RKE,Dockershim 的上游支持从 Kubernetes 1.21 开始。你需要使用 Rancher 2.6 或更高版本才能获取使用 Kubernetes 1.21 的 RKE 的支持。详情请参阅我们的[支持矩阵](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
-Q. 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
+Q: 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
-A. 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
+A: 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/general-faq.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/general-faq.md
index 5cf116534af..cb1fc3858b9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/general-faq.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/general-faq.md
@@ -16,7 +16,7 @@ title: 一般常见问题解答
## 是否可以使用 Rancher 2.x 管理 Azure Kubernetes 服务?
-是的。请参阅我们的[集群管理]((../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md))指南,了解 AKS 上可用的 Rancher 功能,以及相关的 [AKS 的文档](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)
+是的。请参阅我们的[集群管理](../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md)指南,了解 AKS 上可用的 Rancher 功能,以及相关的 [AKS 的文档](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)
## Rancher 是否支持 Windows?
@@ -24,7 +24,7 @@ Rancher 支持 Windows Server 1809 容器。有关如何使用 Windows Worker
## Rancher 是否支持 Istio?
-Rancher 支持 [Istio](../pages-for-subheaders/istio.md)。
+Rancher 支持 [Istio](../integrations-in-rancher/istio/istio.md)。
## Rancher 2.x 是否支持使用 Hashicorp 的 Vault 来存储密文?
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/install-and-configure-kubectl.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/install-and-configure-kubectl.md
index 21c301639b6..2b9764b84f2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/install-and-configure-kubectl.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/install-and-configure-kubectl.md
@@ -4,11 +4,11 @@ title: 安装和配置 kubectl
`kubectl` 是一个 CLI 工具,用于运行 Kubernetes 集群相关的命令。Rancher 2.x 中的许多维护和管理任务都需要它。
-### 安装
+## 安装
请参阅 [kubectl 安装](https://kubernetes.io/docs/tasks/tools/install-kubectl/)将 kubectl 安装到你的操作系统上。
-### 配置
+## 配置
使用 RKE 创建 Kubernetes 集群时,RKE 会在本地目录中创建一个 `kube_config_cluster.yml`,该文件包含使用 `kubectl` 或 `helm` 等工具连接到新集群的凭证。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/rancher-is-no-longer-needed.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/rancher-is-no-longer-needed.md
index ffb98927c69..0ae8e7be37b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/rancher-is-no-longer-needed.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/rancher-is-no-longer-needed.md
@@ -4,20 +4,19 @@ title: 卸载 Rancher
本文介绍了如果你不再需要 Rancher、不想再由 Rancher 管理集群、或想删除 Rancher Server 需要怎么做。
-
-### 如果 Rancher Server 被删除,下游集群中的工作负载会怎样?
+## 如果 Rancher Server 被删除,下游集群中的工作负载会怎样?
如果 Rancher 删除了或无法恢复,Rancher 管理的下游 Kubernetes 集群中的所有工作负载将继续正常运行。
-### 如果删除了 Rancher Server,该如何访问下游集群?
+## 如果删除了 Rancher Server,该如何访问下游集群?
如果删除了 Rancher,访问下游集群的方式取决于集群的类型和集群的创建方式。总而言之:
- **注册集群**:集群不受影响,你可以注册集群前的方法访问该集群。
- **托管的 Kubernetes 集群**:如果你在 Kubernetes 云提供商(例如 EKS、GKE 或 AKS)中创建集群,你可以继续使用提供商的云凭证来管理集群。
-- **RKE 集群**:要访问 [RKE 集群](../pages-for-subheaders/launch-kubernetes-with-rancher.md),集群必须启用了[授权集群端点(authorized cluster endpoint,ACE)](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点),而且你必须从 Rancher UI 下载了集群的 kubeconfig 文件。RKE 集群默认启用授权集群端点。通过使用此端点,你可以直接使用 kubectl 访问你的集群,而不用通过 Rancher Server 的[认证代理](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-认证代理)进行通信。有关配置 kubectl 以使用授权集群端点的说明,请参阅[使用 kubectl 和 kubeconfig 文件直接访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)。这些集群将使用删除 Rancher 时配置的身份验证快照。
+- **RKE 集群**:要访问 [RKE 集群](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md),集群必须启用了[授权集群端点(authorized cluster endpoint,ACE)](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点),而且你必须从 Rancher UI 下载了集群的 kubeconfig 文件。RKE 集群默认启用授权集群端点。通过使用此端点,你可以直接使用 kubectl 访问你的集群,而不用通过 Rancher Server 的[认证代理](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-认证代理)进行通信。有关配置 kubectl 以使用授权集群端点的说明,请参阅[使用 kubectl 和 kubeconfig 文件直接访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)。这些集群将使用删除 Rancher 时配置的身份验证快照。
-### 如果我不想再使用 Rancher 了该怎么做?
+## 如果我不想再使用 Rancher 了该怎么做?
:::note
@@ -25,7 +24,7 @@ title: 卸载 Rancher
:::
-如果你[在 Kubernetes 集群上安装了 Rancher](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md),你可以使用 [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) 工具删除 Rancher。
+如果你[在 Kubernetes 集群上安装了 Rancher](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md),你可以使用 [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) 工具删除 Rancher。
在高可用 (HA) 模式下卸载 Rancher 还将删除所有 `helm-operation-*` Pod 和以下应用程序:
@@ -40,7 +39,7 @@ title: 卸载 Rancher
移除 Rancher 不会影响导入的集群。有关其他集群类型,请参考[移除 Rancher 后访问下游集群](#如果删除了-rancher-server该如何访问下游集群)。
-### 如果我不想 Rancher 管理我的注册集群该怎么办?
+## 如果我不想 Rancher 管理我的注册集群该怎么办?
如果你在 Rancher UI 中删除了已注册的集群,则该集群将与 Rancher 分离,集群不会发生改变,你可以使用注册集群之前的方法访问该集群。
@@ -52,7 +51,7 @@ title: 卸载 Rancher
**结果**:注册的集群已与 Rancher 分离,并在 Rancher 外正常运行。
-### 如果我不想 Rancher 管理我的 RKE 集群或托管的 Kubernetes 集群该怎么办?
+## 如果我不想 Rancher 管理我的 RKE 集群或托管的 Kubernetes 集群该怎么办?
目前,我们没有将这些集群从 Rancher 中分离出来的功能。在这种情况下,“分离”指的是将 Rancher 组件移除出集群,并独立于 Rancher 管理对集群的访问。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/security.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/security.md
index 0078c58eac7..805cfd72c7f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/security.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/security.md
@@ -5,10 +5,10 @@ title: 安全
**是否有强化指南?**
-强化指南现在位于[安全](../pages-for-subheaders/rancher-security.md)部分。
+强化指南现在位于[安全](../reference-guides/rancher-security/rancher-security.md)部分。
**Rancher Kubernetes 集群 CIS Benchmark 测试的结果是什么?**
-我们已经针对强化的 Rancher Kubernetes 集群运行了 CIS Kubernetes Benchmark 测试。你可以在[安全](../pages-for-subheaders/rancher-security.md)中找到该评估的结果。
+我们已经针对强化的 Rancher Kubernetes 集群运行了 CIS Kubernetes Benchmark 测试。你可以在[安全](../reference-guides/rancher-security/rancher-security.md)中找到该评估的结果。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/technical-items.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/technical-items.md
index 2bc3cfb6bfc..b781a8cebb1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/technical-items.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/technical-items.md
@@ -2,9 +2,10 @@
title: 技术
---
-### 如何重置管理员密码?
+## 如何重置管理员密码?
Docker 安装:
+
```
$ docker exec -ti reset-password
New password for default administrator (user-xxxxx):
@@ -12,6 +13,7 @@ New password for default administrator (user-xxxxx):
```
Kubernetes 安装(Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password
@@ -19,10 +21,10 @@ New password for default administrator (user-xxxxx):
```
+## 我删除/停用了最后一个 admin,该如何解决?
-
-### 我删除/停用了最后一个 admin,该如何解决?
Docker 安装:
+
```
$ docker exec -ti ensure-default-admin
New default administrator (user-xxxxx)
@@ -31,38 +33,40 @@ New password for default administrator (user-xxxxx):
```
Kubernetes 安装(Helm):
+
```
$ KUBECONFIG=./kube_config_cluster.yml
$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin
New password for default administrator (user-xxxxx):
```
-### 如何启用调试日志记录?
+
+## 如何启用调试日志记录?
请参阅[故障排除:日志记录](../troubleshooting/other-troubleshooting-tips/logging.md)。
-### 我的 ClusterIP 不响应 ping,该如何解决?
+## 我的 ClusterIP 不响应 ping,该如何解决?
ClusterIP 是一个虚拟 IP,不会响应 ping。要测试 ClusterIP 是否配置正确,最好的方法是使用 `curl` 访问 IP 和端口并检查它是否响应。
-### 在哪里管理节点模板?
+## 在哪里管理节点模板?
打开你的账号菜单(右上角)并选择`节点模板`。
-### 为什么我的四层负载均衡器处于 `Pending` 状态?
+## 为什么我的四层负载均衡器处于 `Pending` 状态?
-四层负载均衡器创建为 `type: LoadBalancer`。Kubernetes 需要一个可以满足这些请求的云提供商或控制器,否则这些请求将永远处于 `Pending` 状态。有关更多信息,请参阅[云提供商](../pages-for-subheaders/set-up-cloud-providers.md)或[创建外部负载均衡器](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)。
+四层负载均衡器创建为 `type: LoadBalancer`。Kubernetes 需要一个可以满足这些请求的云提供商或控制器,否则这些请求将永远处于 `Pending` 状态。有关更多信息,请参阅[云提供商](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)或[创建外部负载均衡器](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)。
-### Rancher 的状态存储在哪里?
+## Rancher 的状态存储在哪里?
- Docker 安装:在 `rancher/rancher` 容器的嵌入式 etcd 中,位于 `/var/lib/rancher`。
- Kubernetes install:在为运行 Rancher 而创建的 RKE 集群的 etcd 中。
-### 支持的 Docker 版本是如何确定的?
+## 支持的 Docker 版本是如何确定的?
我们遵循上游 Kubernetes 版本验证过的 Docker 版本。如果需要获取验证过的版本,请查看 Kubernetes 版本 CHANGELOG.md 中的 [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies)。
-### 如何访问 Rancher 创建的节点?
+## 如何访问 Rancher 创建的节点?
你可以转到**节点**视图,然后下载用于访问 Rancher 创建的节点的 SSH 密钥。选择要访问的节点并单击行尾 **⋮** 按钮,然后选择**下载密钥**,如下图所示。
@@ -74,14 +78,14 @@ ClusterIP 是一个虚拟 IP,不会响应 ping。要测试 ClusterIP 是否配
$ ssh -i id_rsa user@ip_of_node
```
-### 如何在 Rancher 中自动化任务 X?
+## 如何在 Rancher 中自动化任务 X?
UI 由静态文件组成,并根据 API 的响应工作。换言之,UI 中可以执行的每个操作/任务都可以通过 API 进行自动化。有两种方法可以实现这一点:
* 访问 `https://your_rancher_ip/v3` 并浏览 API 选项。
* 在使用 UI 时捕获 API 调用(通常使用 [Chrome 开发者工具](https://developers.google.com/web/tools/chrome-devtools/#network),但你也可以使用其他工具)。
-### 节点的 IP 地址改变了,该如何恢复?
+## 节点的 IP 地址改变了,该如何恢复?
节点需要配置静态 IP(或使用 DHCP 保留的 IP)。如果节点的 IP 已更改,你必须在集群中删除并重新添加它。删除后,Rancher 会将集群更新为正确的状态。如果集群不再处于 `Provisioning` 状态,则已从集群删除该节点。
@@ -89,11 +93,11 @@ UI 由静态文件组成,并根据 API 的响应工作。换言之,UI 中可
在集群中移除并清理节点时,你可以将节点重新添加到集群中。
-### 如何将其他参数/绑定/环境变量添加到 Rancher 启动的 Kubernetes 集群的 Kubernetes 组件中?
+## 如何将其他参数/绑定/环境变量添加到 Rancher 启动的 Kubernetes 集群的 Kubernetes 组件中?
你可以使用集群选项中的[配置文件](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)选项来添加其他参数/绑定/环境变量。有关详细信息,请参阅 RKE 文档中的[其他参数、绑定和环境变量](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/),或浏览 [Cluster.ymls 示例](https://rancher.com/docs/rke/latest/en/example-yamls/)。
-### 如何检查证书链是否有效?
+## 如何检查证书链是否有效?
使用 `openssl verify` 命令来验证你的证书链:
@@ -134,7 +138,7 @@ subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com
issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA
```
-### 如何在服务器证书中检查 `Common Name` 和 `Subject Alternative Names`?
+## 如何在服务器证书中检查 `Common Name` 和 `Subject Alternative Names`?
虽然技术上仅需要 `Subject Alternative Names` 中有一个条目,但在 `Common Name` 和 `Subject Alternative Names` 中都包含主机名可以最大程度地提高与旧版浏览器/应用程序的兼容性。
@@ -152,7 +156,7 @@ openssl x509 -noout -in cert.pem -text | grep DNS
DNS:rancher.my.org
```
-### 为什么节点发生故障时重新调度一个 pod 需要 5 分钟以上的时间?
+## 为什么节点发生故障时重新调度一个 pod 需要 5 分钟以上的时间?
这是以下默认 Kubernetes 设置的组合导致的:
@@ -171,6 +175,6 @@ Kubernetes 1.13 默认启用 `TaintBasedEvictions` 功能。有关详细信息
* `default-not-ready-toleration-seconds`:表示 `notReady:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
* `default-unreachable-toleration-seconds`:表示 `unreachable:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
-### 我可以在 UI 中使用键盘快捷键吗?
+## 我可以在 UI 中使用键盘快捷键吗?
是的,你可以使用键盘快捷键访问 UI 的大部分内容。要查看快捷方式的概览,请在 UI 任意位置按 `?`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/telemetry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/telemetry.md
index 400f6e839ad..8d6f997c443 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/telemetry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/faq/telemetry.md
@@ -2,11 +2,11 @@
title: 遥测
---
-### 什么是遥测?
+## 什么是遥测?
遥测(Telemetry)收集 Rancher 安装大小、使用的组件版本以及使用功能的汇总信息。Rancher Labs 会使用此信息来改进产品,我们不会与第三方共享此信息。
-### 收集什么信息?
+## 收集什么信息?
我们不会收集任何识别信息(如用户名、密码或用户资源的名称或地址)。
@@ -20,12 +20,12 @@ title: 遥测
- 运行的 Rancher 的镜像名称和版本。
- 此安装的唯一随机标识符。
-### 我可以看到发送的信息吗?
+## 我可以看到发送的信息吗?
如果启用了遥测,你可以转到 `https:///v1-telemetry` 查看当前数据。
如果未启用遥测,则收集数据的进程未运行,因此没有可供查看的内容。
-### 如何打开或关闭它?
+## 如何打开或关闭它?
完成初始设置后,管理员可以转到 UI `全局`中的`设置`页面,单击**编辑**,然后将 `telemetry-opt` 更改为 `in` 或 `out`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
index 8e5c0eca63f..4f6461e85c6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
@@ -8,7 +8,7 @@ title: 在离线环境中升级
:::
-### Rancher Helm 模板选项
+## Rancher Helm 模板选项
使用安装 Rancher 时选择的选项来渲染 Rancher 模板。参考下表来替换每个占位符。Rancher 需要配置为使用私有镜像仓库,以便配置所有 Rancher 启动的 Kubernetes 集群或 Rancher 工具。
@@ -21,7 +21,6 @@ title: 在离线环境中升级
| `` | 你的私有镜像仓库的 DNS 名称。 |
| `` | 在 K8s 集群上运行的 cert-manager 版本。 |
-
### 选项 A:使用默认的自签名证书
```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
index 437bc35815e..d2f716b4038 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
@@ -73,7 +73,7 @@ Rancher 是使用 Kubernetes 的 [Helm](https://helm.sh/) 包管理器安装的
### 1. 添加 Helm Chart 仓库
-执行 `helm repo add` 命令,以添加包含安装 Rancher 的 Chart 的 Helm Chart 仓库。有关如何选择仓库,以及哪个仓库最适合你的用例,请参见[选择 Rancher 版本](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md)。
+执行 `helm repo add` 命令,以添加包含安装 Rancher 的 Chart 的 Helm Chart 仓库。有关如何选择仓库,以及哪个仓库最适合你的用例,请参见[选择 Rancher 版本](../resources/choose-a-rancher-version.md)。
- Latest:建议用于试用最新功能
```
@@ -103,7 +103,7 @@ Rancher Management Server 默认需要 SSL/TLS 配置来保证访问的安全性
:::note
-如果你想在外部终止 SSL/TLS,请参见[外部负载均衡器的 TLS 终止](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)。
+如果你想在外部终止 SSL/TLS,请参见[外部负载均衡器的 TLS 终止](../installation-references/helm-chart-options.md#外部-tls-终止)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
index 04c2d4ff181..2cb440f868e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
@@ -180,7 +180,7 @@ ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:3187
## 10. 安装 Rancher Helm Chart
-按照[本页](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
+按照[本页](./install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
安装 Rancher 时,使用上一步获取的 DNS 名称作为 Rancher Server 的 URL。它可以作为 Helm 选项传递进来。例如,如果 DNS 名称是 `rancher.my.org`,你需要使用 `--set hostname=rancher.my.org` 选项来运行 Helm 安装命令。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
index c306729f752..01674c0dfd1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
@@ -49,7 +49,7 @@ Rancher v2.6.4 将 cluster-api 模块从 v0.4.4 升级到 v1.0.2。反过来,c
1. 在左侧导航栏中,点击 **Rancher 备份 > 还原**。
:::note
- 如果 Rancher Backups 应用不可见,你需要到 **Apps** 的 Charts 页面中安装应用。详情请参见[此处](../../../pages-for-subheaders/helm-charts-in-rancher.md#charts)。
+ 如果 Rancher Backups 应用不可见,你需要到 **Apps** 的 Charts 页面中安装应用。详情请参见[此处](../../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md#访问-charts)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
index 2912d831f55..b303fc0c1c9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
@@ -4,7 +4,7 @@ title: Rancher Server Kubernetes 集群的问题排查
本文介绍如何对安装在 Kubernetes 集群上的 Rancher 进行故障排除。
-### 相关命名空间
+## 相关命名空间
故障排除主要针对以下 3 个命名空间中的对象:
@@ -12,7 +12,7 @@ title: Rancher Server Kubernetes 集群的问题排查
- `ingress-nginx`:Ingress Controller Pod 和 services。
- `cert-manager`:`cert-manager` Pod。
-### "default backend - 404"
+## "default backend - 404"
很多操作都有可能导致 Ingress Controller 无法将流量转发到你的 Rancher 实例。但是大多数情况下都是由错误的 SSL 配置导致的。
@@ -21,7 +21,7 @@ title: Rancher Server Kubernetes 集群的问题排查
- [Rancher 是否正在运行](#检查-rancher-是否正在运行)
- [证书的 Common Name(CN)是 "Kubernetes Ingress Controller Fake Certificate"](#证书的-cn-是-kubernetes-ingress-controller-fake-certificate)
-### 检查 Rancher 是否正在运行
+## 检查 Rancher 是否正在运行
使用 `kubectl` 检查 `cattle-system` 系统命名空间,并查看 Rancher Pod 的状态是否是 **Running**:
@@ -49,7 +49,7 @@ Events:
Normal Started 11m kubelet, localhost Started container
```
-### 检查 Rancher 日志
+## 检查 Rancher 日志
使用 `kubectl` 列出 Pod:
@@ -66,7 +66,7 @@ pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m
kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh
```
-### 证书的 CN 是 "Kubernetes Ingress Controller Fake Certificate"
+## 证书的 CN 是 "Kubernetes Ingress Controller Fake Certificate"
使用浏览器检查证书的详细信息。如果显示 CN 是 "Kubernetes Ingress Controller Fake Certificate",则说明读取或颁发 SSL 证书时出现了问题。
@@ -76,7 +76,7 @@ kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh
:::
-### 排查 Cert-Manager 颁发的证书(Rancher 或 Let's Encrypt 生成的)问题
+## 排查 Cert-Manager 颁发的证书(Rancher 或 Let's Encrypt 生成的)问题
`cert-manager` 有 3 部分:
@@ -107,7 +107,7 @@ Events:
Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found
```
-### 排查你自己提供的 SSL 证书问题
+## 排查你自己提供的 SSL 证书问题
你的证书直接应用于 `cattle-system` 命名空间中的 Ingress 对象。
@@ -127,7 +127,7 @@ kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-co
W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found
```
-### 没有匹配的 "Issuer"
+## 没有匹配的 "Issuer"
你所选的 SSL 配置要求在安装 Rancher 之前先安装 Cert-Manager,否则会出现以下错误:
@@ -138,18 +138,18 @@ Error: validation failed: unable to recognize "": no matches for kind "Issuer" i
在这种情况下,先安装 Cert-Manager,然后再重新安装 Rancher。
-### Canal Pod 显示 READY 2/3
+## Canal Pod 显示 READY 2/3
此问题的最常见原因是端口 8472/UDP 在节点之间未打开。因此,你可以检查你的本地防火墙、网络路由或安全组。
解决网络问题后,`canal` Pod 会超时并重启以建立连接。
-### nginx-ingress-controller Pod 显示 RESTARTS
+## nginx-ingress-controller Pod 显示 RESTARTS
此问题的最常见原因是 `canal` pod 未能建立覆盖网络。参见 [canal Pod 显示 READY `2/3`](#canal-pod-显示-ready-23) 进行排查。
-### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed)
+## Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed)
此错误的原因可能是:
@@ -171,18 +171,18 @@ $ nc xxx.xxx.xxx.xxx 22
SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10
```
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found
`ssh_key_path` 密钥文件无法访问:请确保你已经指定了私钥文件(不是公钥 `.pub`),而且运行 `rke` 命令的用户可以访问该私钥文件。
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain
`ssh_key_path` 密钥文件不是访问节点的正确文件:请仔细检查,确保你已为节点指定了正确的 `ssh_key_path` 和连接用户。
-### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys
+## Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys
如需使用加密的私钥,请使用 `ssh-agent` 来使用密码来加载密钥。如果在运行 `rke` 命令的环境中找到 `SSH_AUTH_SOCK` 环境变量,它将自动用于连接到节点。
-### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
+## Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
节点无法通过配置的 `address` 和 `port` 访问。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-references/feature-flags.md
index 5a022b13135..e14ec8e7d9e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-references/feature-flags.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-references/feature-flags.md
@@ -15,14 +15,14 @@ title: 功能开关
以下是 Rancher 中可用的功能开关列表。如果你是从旧 Rancher 版本升级的,你可能会在 Rancher UI 中看到其他功能,例如 `proxy` 或 `dashboard`(均[已中断](/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md)):
- `continuous-delivery`:允许从 Fleet 中单独禁用 Fleet GitOps。有关详细信息,请参阅[持续交付](../../../how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md)。
-- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
+- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../integrations-in-rancher/fleet/fleet.md)。
- `harvester`:管理 Virtualization Management 页面的访问。用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。有关详细信息,请参阅 [Harvester 集成](../../../integrations-in-rancher/harvester/overview.md)。
- `istio-virtual-service-ui`:启用[可视界面](../../../how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md)来创建、读取、更新和删除 Istio 虚拟服务和目标规则,这些都是 Istio 流量管理功能。
- `legacy`:启用 2.5.x 及更早版本的一组功能,这些功能正逐渐被新的实现淘汰。它们是已弃用以及后续可用于新版本的功能组合。新的 Rancher 安装会默认禁用此标志。如果你从以前版本的 Rancher 升级,此标志会启用。
- `multi-cluster-management`:允许配置和管理多个 Kubernetes 集群。此标志只能在安装时设置。后续无法启用或禁用它。
- `rke1-custom-node-cleanup`:清除已删除的 RKE1 自定义节点。建议你启用此标志,以防止已删除的节点尝试重新加入集群。
- `rke2`:启用配置 RKE2 集群。此标志默认启用。
-- `token-hashing`:启用令牌哈希。启用后,会使用 SHA256 算法对现有 Token 和所有新 Token 进行哈希处理。一旦对 Token 进行哈希处理,就无法撤消操作。此标志在启用后无法禁用。有关详细信息,请参阅 [API 令牌](../../../reference-guides/about-the-api/api-tokens.md#令牌哈希)。
+- `token-hashing`:启用令牌哈希。启用后,会使用 SHA256 算法对现有 Token 和所有新 Token 进行哈希处理。一旦对 Token 进行哈希处理,就无法撤消操作。此标志在启用后无法禁用。有关详细信息,请参阅 [API 令牌](../../../api/api-tokens.md#令牌哈希)。
- `unsupported-storage-drivers`:允许启用非默认启用的存储提供程序和卷插件。有关详细信息,请参阅[允许使用不受支持的存储驱动程序](../../../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)。
下表介绍了 Rancher 中功能开关的可用性和默认值。标记为“GA”的功能已普遍可用:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
index 84f2383eaf7..8a651472811 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
@@ -18,7 +18,7 @@ enable_cri_dockerd: true
如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。然后,你就可以通过 Rancher 对导入的 RKE2 和 K3s Kubernetes 集群进行升级和管理。
-### 常见问题
+## 常见问题
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
index 4e9ec43a2a1..8bc8a72bf35 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
@@ -71,7 +71,7 @@ Rancher 的硬件占用空间取决于许多因素,包括:
- 工作负载数量 (例如: Kubernetes 部署,Fleet 部署)。
- 使用模式 (例如:主动使用的功能集合,使用频率,并发用户数量).
-由于存在许多可能随时间变化的影响因素,因此此处列出的要求为适合大多数用例的起点。 然而,你的用例可能有不同的要求。 若你需要对于特定场景的咨询,请[联系 Rancher]((https://rancher.com/contact/)) 以获得进一步指导。
+由于存在许多可能随时间变化的影响因素,因此此处列出的要求为适合大多数用例的起点。 然而,你的用例可能有不同的要求。 若你需要对于特定场景的咨询,请[联系 Rancher](https://rancher.com/contact/) 以获得进一步指导。
特别指出,本页面中的要求基于以下假设的环境提出,包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
index a639349d8cf..23612291fc0 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
@@ -24,7 +24,7 @@ Docker 安装适用于想要测试 Rancher 的用户。
选择以下的选项之一:
-### 选项 A:使用 Rancher 默认的自签名证书
+## 选项 A:使用 Rancher 默认的自签名证书
单击展开
@@ -51,7 +51,7 @@ docker run -d --restart=unless-stopped \
-### 选项 B:使用你自己的证书 - 自签名
+## 选项 B:使用你自己的证书 - 自签名
单击展开
@@ -94,7 +94,7 @@ docker run -d --restart=unless-stopped \
-### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
+## 选项 C:使用你自己的证书 - 可信 CA 签名的证书
单击展开
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
index 8978a639b0e..e70759c3f3a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
@@ -21,7 +21,7 @@ Rancher 可以安装在任何 Kubernetes 集群上。为了阅读方便,我们
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
- **私有镜像仓库**,用于将容器镜像分发到你的主机。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
@@ -29,7 +29,7 @@ Rancher 可以安装在任何 Kubernetes 集群上。为了阅读方便,我们
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置外部数据库
+## 2. 配置外部数据库
K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
@@ -45,7 +45,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
-### 3. 配置负载均衡器
+## 3. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -68,7 +68,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -78,7 +78,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 5. 配置私有镜像仓库
+## 5. 配置私有镜像仓库
Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -102,21 +102,21 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -139,7 +139,7 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -149,7 +149,7 @@ Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 4. 配置私有镜像仓库
+## 4. 配置私有镜像仓库
Rancher 支持使用安全的私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -172,15 +172,15 @@ Rancher 支持使用安全的私有镜像仓库进行离线安装。你必须有
:::
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
此主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
-请确保你的节点满足[操作系统,容器,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规安装要求。
+请确保你的节点满足[操作系统,容器,硬件和网络](../../installation-requirements/installation-requirements.md)的常规安装要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置私有 Docker 镜像仓库
+## 2. 配置私有 Docker 镜像仓库
Rancher 支持使用私有镜像仓库在堡垒服务器中进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
@@ -189,5 +189,5 @@ Rancher 支持使用私有镜像仓库在堡垒服务器中进行离线安装。
-### 后续操作
+## 后续操作
[收集镜像并发布到你的私有镜像仓库](publish-images.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
index d28b59ed745..bcf54ebf66e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
@@ -4,7 +4,7 @@ title: 4. 安装 Rancher
本文介绍如何在高可用 Kubernetes 安装的离线环境部署 Rancher。离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### Rancher 特权访问
+## Rancher 特权访问
当 Rancher Server 部署在 Docker 容器中时,容器内会安装一个本地 Kubernetes 集群供 Rancher 使用。为 Rancher 的很多功能都是以 deployment 的方式运行的,而在容器内运行容器是需要特权模式的,因此你需要在安装 Rancher 时添加 `--privileged` 选项。
@@ -116,7 +116,7 @@ curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/re
将获取的 Chart 复制到有权访问 Rancher Server 集群的系统以完成安装。
-##### 1. 安装 Cert-Manager
+#### 1. 安装 Cert-Manager
使用要用于安装 Chart 的选项来安装 cert-manager。记住要设置 `image.repository` 选项,以从你的私有镜像仓库拉取镜像。此操作会创建一个包含 Kubernetes manifest 文件的 `cert-manager` 目录。
@@ -156,7 +156,8 @@ curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/re
-##### 2. 安装 Rancher
+### 2. 安装 Rancher
+
首先,参见[添加 TLS 密文](../../resources/add-tls-secrets.md)发布证书文件,以便 Rancher 和 Ingress Controller 可以使用它们。
然后,使用 kubectl 为 Rancher 创建命名空间:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
index 24d8ac7d47c..f16dfc23d57 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
@@ -2,13 +2,13 @@
title: 其他安装方式
---
-### 离线安装
+## 离线安装
按照[以下步骤](air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### Docker 安装
+## Docker 安装
[单节点 Docker 安装](rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
index 7ac7caca50c..8ca4c273e92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
@@ -4,7 +4,7 @@ title: 3. 安装 Rancher
在前文的操作后,你已经有了一个运行的 RKE 集群,现在可以在其中安装 Rancher 了。出于安全考虑,所有到 Rancher 的流量都必须使用 TLS 加密。在本教程中,你将使用 [cert-manager](https://cert-manager.io/)自动颁发自签名证书。在实际使用情况下,你可使用 Let's Encrypt 或自己的证书。
-### 安装 Helm CLI
+## 安装 Helm CLI
@@ -16,7 +16,7 @@ chmod +x get_helm.sh
sudo ./get_helm.sh
```
-### 安装 cert-manager
+## 安装 cert-manager
添加 cert-manager Helm 仓库:
@@ -59,7 +59,7 @@ kubectl rollout status deployment -n cert-manager cert-manager
kubectl rollout status deployment -n cert-manager cert-manager-webhook
```
-### 安装 Rancher
+## 安装 Rancher
接下来,你可以安装 Rancher 了。首先,添加 Helm 仓库:
@@ -97,7 +97,7 @@ kubectl rollout status deployment -n cattle-system rancher
:::
-### 其他资源
+## 其他资源
以下资源可能对安装 Rancher 有帮助:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
index f60e31b2c3e..88f410f4314 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
@@ -12,13 +12,13 @@ title: '1. 配置基础设施'
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
这些主机将通过 HTTP 代理连接到互联网。
@@ -26,7 +26,7 @@ title: '1. 配置基础设施'
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -49,7 +49,7 @@ title: '1. 配置基础设施'
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
@@ -60,5 +60,5 @@ title: '1. 配置基础设施'
有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
-### 后续操作
+## 后续操作
[配置 Kubernetes 集群](install-kubernetes.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
index 34ee707431b..992fd0b55e2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
@@ -4,7 +4,7 @@ title: 证书故障排除
-### 如何确定我的证书格式是否为 PEM?
+## 如何确定我的证书格式是否为 PEM?
你可以通过以下特征识别 PEM 格式:
@@ -48,7 +48,7 @@ VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
-----END PRIVATE KEY-----
```
-### 将 PKCS8 证书密钥转换为 PKCS1
+## 将 PKCS8 证书密钥转换为 PKCS1
如果你使用的是 PKCS8 证书密钥文件,Rancher 将打印以下日志:
@@ -64,7 +64,7 @@ openssl rsa -in key.pem -out convertedkey.pem
你可使用 `convertedkey.pem` 作为 Rancher 证书密钥文件。
-### 添加中间证书的顺序是什么?
+## 添加中间证书的顺序是什么?
添加证书的顺序如下:
@@ -77,7 +77,7 @@ openssl rsa -in key.pem -out convertedkey.pem
-----END CERTIFICATE-----
```
-### 如何验证我的证书链?
+## 如何验证我的证书链?
你可使用 `openssl` 二进制文件来验证证书链。如果命令的输出以 `Verify return code: 0 (ok)` 结尾(参见以下示例),你的证书链是有效的。`ca.pem` 文件必须与你添加到 `rancher/rancher` 容器中的文件一致。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
index c2a63b86a5e..ae91e251294 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
@@ -73,7 +73,7 @@ docker run -d --restart=unless-stopped \
使用 [OpenSSL](https://www.openssl.org/) 或其他方法创建自签名证书。
- 证书文件的格式必须是 PEM。
-- 在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+- 在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](./certificate-troubleshooting.md)。
:::
@@ -107,7 +107,7 @@ docker run -d --restart=unless-stopped \
:::note 先决条件:
- 证书文件的格式必须是 PEM。
-- 在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+- 在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](./certificate-troubleshooting.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
index 7dbef6d2cab..06a8d7adfa9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
@@ -147,7 +147,7 @@ docker run -d --volumes-from rancher-data \
rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -183,7 +183,7 @@ docker run -d --volumes-from rancher-data \
rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -218,7 +218,7 @@ docker run -d --volumes-from rancher-data \
--no-cacerts
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 D:Let's Encrypt 证书
@@ -255,7 +255,7 @@ docker run -d --volumes-from rancher-data \
--acme-domain
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
@@ -288,7 +288,7 @@ docker run -d --volumes-from rancher-data \
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 B:使用你自己的证书 - 自签名
@@ -324,7 +324,7 @@ docker run -d --restart=unless-stopped \
--privileged \
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
#### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
@@ -366,7 +366,7 @@ docker run -d --volumes-from rancher-data \
--privileged
/rancher/rancher:
```
-特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+特权访问是[必须](./rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/bootstrap-password.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/bootstrap-password.md
index 249b14115a0..6819429dbe5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/bootstrap-password.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/bootstrap-password.md
@@ -6,7 +6,7 @@ Rancher 首次启动时,会为第一个管理员用户随机生成一个密码
如果你在安装过程中没有使用变量来设置引导密码,则会随机生成引导密码。如需了解使用变量设置引导密码的详情,请参见下文。
-### 在 Helm 安装中指定引导密码
+## 在 Helm 安装中指定引导密码
Helm 安装的情况下,你可以使用 `.Values.bootstrapPassword` 在 Helm Chart 值中指定引导密码变量。
@@ -16,7 +16,7 @@ Helm 安装的情况下,你可以使用 `.Values.bootstrapPassword` 在 Helm C
kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}'
```
-### 在 Docker 安装中指定引导密码
+## 在 Docker 安装中指定引导密码
如果 Rancher 是使用 Docker 安装的,你可以通过在 Docker 安装命令中传递 `-e CATTLE_BOOTSTRAP_PASSWORD=password` 来指定引导密码。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
index 7ddfcef2d8e..734ef56e8a5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
@@ -95,7 +95,7 @@ Rancher Helm Chart 版本与 Rancher 版本(即 `appVersion`)对应。添加
-在执行 [Docker 安装](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)、升级或回滚时,你可以使用 _tags_ 来安装特定版本的 Rancher。
+在执行 [Docker 安装](../other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)、升级或回滚时,你可以使用 _tags_ 来安装特定版本的 Rancher。
### Server 标签
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/resources.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/resources.md
index 2862aba2cd2..ce4989d9d0e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/resources.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/resources.md
@@ -2,19 +2,19 @@
title: 资源
---
-### Docker 安装
+## Docker 安装
[单节点 Docker 安装](../other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
-### 离线安装
+## 离线安装
按照[以下步骤](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-### 高级选项
+## 高级选项
安装 Rancher 时,有如下几个可开启的高级选项:每个安装指南中都提供了对应的选项。了解选项详情:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
index 4678487523e..c5eb98f8aaf 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
@@ -176,7 +176,7 @@ kubectl edit -n cattle-system deployment/cattle-cluster-agent
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
@@ -256,7 +256,7 @@ helm ls -n cattle-system
### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
-在 Rancher UI 的[持续交付](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
#### 为什么要执行这一步骤?
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md
index 3d825627ef3..f8a6cb3d1a0 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md
@@ -264,7 +264,7 @@ cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m
---
-Rancher 现在支持 cert-manager 1.6.2 和 1.7.1。推荐使用 v1.7.x,因为 v 1.6.x 将在 2022 年 3 月 30 日结束生命周期。详情请参见 [cert-manager 文档](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。有关将 cert-manager 从 1.5 升级到 1.6 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/)。有关将 cert-manager 从 1.6 升级到 1.7 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/)。
+Rancher 现在支持 cert-manager 1.6.2 和 1.7.1。推荐使用 v1.7.x,因为 v 1.6.x 将在 2022 年 3 月 30 日结束生命周期。详情请参见 [cert-manager 文档](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。有关将 cert-manager 从 1.5 升级到 1.6 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/)。有关将 cert-manager 从 1.6 升级到 1.7 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/)。
---
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
index 3d01c38aed8..7ea8aaaf817 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
@@ -32,7 +32,7 @@ Rancher 的 Kubernetes 元数据包含 Rancher 用于配置 [RKE 集群](../../h
- 更改 Rancher 用于同步元数据的 URL。适用于要让 Rancher 从本地同步而不是与 GitHub 同步的情况。这在离线环境下非常有用。
- 防止 Rancher 自动同步元数据。这可以防止在 Rancher 中使用新的/不受支持的 Kubernetes 版本。
-### 刷新 Kubernetes 元数据
+## 刷新 Kubernetes 元数据
默认情况下,管理员或具有**管理集群驱动**[全局角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)的用户,可以刷新 Kubernetes 元数据。
@@ -44,7 +44,7 @@ Rancher 的 Kubernetes 元数据包含 Rancher 用于配置 [RKE 集群](../../h
你可以将 `refresh-interval-minutes` 设置为 `0`(见下文),将 Rancher 配置为仅在需要时刷新元数据,并在需要时使用此按钮手动执行元数据刷新。
-### 配置元数据同步
+## 配置元数据同步
:::caution
@@ -70,7 +70,7 @@ RKE 元数据的配置控制 Rancher 同步元数据的频率以及从何处下
但是,如果你有[离线设置](#离线设置)需求,你需要将 Kubernetes 元数据仓库镜像到 Rancher 可用的位置。然后,你需要更改 URL 来指向 JSON 文件的新位置。
-### 离线设置
+## 离线设置
Rancher Server 会定期刷新 `rke-metadata-config` 来下载新的 Kubernetes 版本元数据。有关 Kubernetes 和 Rancher 版本的兼容性表,请参阅[服务条款](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/nodeports.md
index bf2294d4c06..1023f0da487 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/nodeports.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/nodeports.md
@@ -2,11 +2,11 @@
title: 部署带有 NodePort 的工作负载
---
-### 先决条件
+## 先决条件
你已有一个正在运行的集群,且该集群中有至少一个节点。
-### 1. 部署工作负载
+## 1. 部署工作负载
你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
@@ -36,11 +36,11 @@ title: 部署带有 NodePort 的工作负载
-### 2. 查看应用
+## 2. 查看应用
在**工作负载**页面中,点击工作负载下方的链接。如果 deployment 已完成,你的应用会打开。
-### 注意事项
+## 注意事项
如果使用云虚拟机,你可能无法访问运行容器的端口。这种情况下,你可以使用 `Execute Shell` 在本地主机的 SSH 会话中测试 Nginx。如果可用的话,使用工作负载下方的链接中 `:` 后面的端口号。在本例中,端口号为 `31568`。
@@ -125,11 +125,11 @@ gettingstarted@rancher:~$
```
-### 已完成!
+## 已完成!
恭喜!你已成功通过 NodePort 部署工作负载。
-#### 后续操作
+### 后续操作
使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
index 86f8017f09e..3beeb02b815 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
@@ -2,11 +2,11 @@
title: 部署带有 Ingress 的工作负载
---
-### 先决条件
+## 先决条件
你已有一个正在运行的集群,且该集群中有至少一个节点。
-### 1. 部署工作负载
+## 1. 部署工作负载
你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
@@ -19,7 +19,7 @@ title: 部署带有 Ingress 的工作负载
1. 点击 **Deployment**。
1. 为工作负载设置**名称**。
1. 在**容器镜像**字段中,输入 `rancher/hello-world`。注意区分大小写。
-1. 在 `Service Type` 点击 **Add Port** 和 `Cluster IP`,并在 **Private Container Port** 字段中输入`80`。你可以将 `Name` 留空或指定名称。通过添加端口,你可以访问集群内外的应用。有关详细信息,请参阅 [Service](../../../pages-for-subheaders/workloads-and-pods.md#services)。
+1. 在 `Service Type` 点击 **Add Port** 和 `Cluster IP`,并在 **Private Container Port** 字段中输入`80`。你可以将 `Name` 留空或指定名称。通过添加端口,你可以访问集群内外的应用。有关详细信息,请参阅 [Service](../../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md#services)。
1. 单击**创建**。
**结果**:
@@ -27,7 +27,7 @@ title: 部署带有 Ingress 的工作负载
* 工作负载已部署。此过程可能需要几分钟。
* 当工作负载完成部署后,它的状态会变为 **Active**。你可以从项目的**工作负载**页面查看其状态。
-### 2. 通过 Ingress 暴露应用
+## 2. 通过 Ingress 暴露应用
现在应用已启动并运行,你需要暴露应用以让其他服务连接到它。
@@ -53,17 +53,17 @@ title: 部署带有 Ingress 的工作负载
**结果**:应用分配到了一个 `sslip.io` 地址并暴露。这可能需要一两分钟。
-### 查看应用
+## 查看应用
在 **Deployments** 页面中,找到你 deployment 的 **endpoint** 列,然后单击一个 endpoint。可用的 endpoint 取决于你添加到 deployment 中的端口配置。如果你看不到随机分配端口的 endpoint,请将你在创建 Ingress 时指定的路径尾附到 IP 地址上。例如,如果你的 endpoint 是 `xxx.xxx.xxx.xxx` 或 `https://xxx.xxx.xxx.xxx`,把它修改为 `xxx.xxx.xxx.xxx/hello` 或 `https://xxx.xxx.xxx.xxx/hello`。
应用将在另一个窗口中打开。
-#### 已完成!
+### 已完成!
恭喜!你已成功通过 Ingress 部署工作负载。
-#### 后续操作
+### 后续操作
使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
index b10f0b61f1b..f038294116c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -10,14 +10,14 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
## 操作系统,Docker,硬件和网络要求
-请确保你的节点满足常规的[安装要求](../../pages-for-subheaders/installation-requirements.md)。
+请确保你的节点满足常规的[安装要求](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
## 安装概要
## 1. 配置 Linux 主机
-根据我们的[要求](../../pages-for-subheaders/installation-requirements.md)配置一个 Linux 主机来启动 Rancher Server。
+根据我们的[要求](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)配置一个 Linux 主机来启动 Rancher Server。
## 2. 选择一个 SSL 选项并安装 Rancher
@@ -76,11 +76,11 @@ title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
1. 输入以下命令:
- ```
- docker run -d --restart=unless-stopped \
- -p 80:80 -p 443:443 \
- rancher/rancher:latest --no-cacerts
- ```
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
@@ -166,7 +166,7 @@ http {
## 后续操作
- **推荐**:检查单节点[备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)。你可能暂时没有需要备份的数据,但是我们建议你在常规使用 Rancher 后创建备份。
-- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)。
+- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)。
@@ -188,7 +188,7 @@ http {
### 离线环境
-如果你访问此页面是为了完成[离线安装](../../pages-for-subheaders/air-gapped-helm-cli-install.md),则在运行安装命令时,先将你的私有镜像仓库 URL 附加到 Server 标志中。也就是说,在 `rancher/rancher:latest` 前面添加 `` 和私有镜像仓库 URL。
+如果你访问此页面是为了完成[离线安装](../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md),则在运行安装命令时,先将你的私有镜像仓库 URL 附加到 Server 标志中。也就是说,在 `rancher/rancher:latest` 前面添加 `` 和私有镜像仓库 URL。
**示例**:
@@ -208,7 +208,7 @@ docker run -d --restart=unless-stopped \
rancher/rancher:latest
```
-此操作需要 [privileged 访问](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)。
+此操作需要 [privileged 访问](../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md#rancher-特权访问)。
这个 7 层 NGINX 配置已经在 NGINX 1.13(Mainline)和 1.14(Stable)版本上进行了测试。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-api-audit-log.md
index 94974cb52f7..285c70d5617 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-api-audit-log.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-api-audit-log.md
@@ -20,7 +20,7 @@ API 审计可以在 Rancher 安装或升级期间启用。
| 参数 | 描述 |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `AUDIT_LEVEL` | `0` - 禁用审计日志(默认) `1` - 日志事件元数据 `2` - 日志事件元数据和请求体 `3` - 日志事件元数据,请求体和响应体。请求/响应对的每个日志事务都使用同一个的 `auditID`。 如需了解每个设置记录的日志内容,请参见[审计日志级别](#审核日志级别)。 |
+| `AUDIT_LEVEL` | `0` - 禁用审计日志(默认) `1` - 日志事件元数据 `2` - 日志事件元数据和请求体 `3` - 日志事件元数据,请求体和响应体。请求/响应对的每个日志事务都使用同一个的 `auditID`。 如需了解每个设置记录的日志内容,请参见[审计日志级别](#审核日志级别)。 |
| `AUDIT_LOG_PATH` | Rancher Server API 的日志路径。默认路径:`/var/log/auditlog/rancher-api-audit.log`。你可以将日志目录挂载到主机。 示例:`AUDIT_LOG_PATH=/my/custom/path/` |
| `AUDIT_LOG_MAXAGE` | 旧审计日志文件可保留的最大天数。默认为 10 天。 |
| `AUDIT_LOG_MAXBACKUP` | 保留的审计日志最大文件个数。默认值为 10。 |
@@ -30,7 +30,7 @@ API 审计可以在 Rancher 安装或升级期间启用。
### 审核日志级别
-下表介绍了每个 [`AUDIT_LEVEL`](#audit-level) 记录的 API 事务:
+下表介绍了每个 [`AUDIT_LEVEL`](#api-审计日志选项) 记录的 API 事务:
| `AUDIT_LEVEL` 设置 | 请求元数据 | 请求体 | 响应元数据 | 响应体 |
| --------------------- | ---------------- | ------------ | ----------------- | ------------- |
@@ -59,7 +59,7 @@ kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log
#### 发送审计日志
-你可以为集群启用 Rancher 的内置日志收集和传送功能,将审计日志和其他服务日志发送到支持的 endpoint。详情请参见 [Rancher 工具 - Logging](../../pages-for-subheaders/logging.md)。
+你可以为集群启用 Rancher 的内置日志收集和传送功能,将审计日志和其他服务日志发送到支持的 endpoint。详情请参见 [Rancher 工具 - Logging](../../integrations-in-rancher/logging/logging.md)。
## 审计日志示例
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
index c1f42f70a4b..fdc7139d5ef 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
@@ -2,9 +2,9 @@
title: 持续交付
---
-Rancher 中预装的 [Fleet](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
+Rancher 中预装的 [Fleet](../../../integrations-in-rancher/fleet/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 描述 |
---|---|---
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
index 35801de23b4..8e730c24cb3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
@@ -4,9 +4,9 @@ title: UI 管理 Istio 虚拟服务和目标规则
此功能可启动一个 UI,用于管理 Istio 的流量,其中包括创建、读取、更新和删除虚拟服务(Virtual Service)和目标规则(Destination Rule)。
-> **注意**:启用此功能并不会启用 Istio。集群管理员需要[为集群启用 Istio](../../../pages-for-subheaders/istio-setup-guide.md) 才能使用该功能。
+> **注意**:启用此功能并不会启用 Istio。集群管理员需要[为集群启用 Istio](../istio-setup-guide/istio-setup-guide.md) 才能使用该功能。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 状态 | 可用于 |
---|---|---|---
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
index 16ebb15e25a..2981f8e62a9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
@@ -11,7 +11,7 @@ title: "在 ARM64 上运行 Rancher(实验性)"
如果你的节点使用 ARM64 架构,你可以使用以下选项:
- 在 ARM64 架构的节点上运行 Rancher
- - 此选项仅适用于 Docker 安装。请知悉,以下安装命令取代了 [Docker 安装链接](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)中的示例:
+ - 此选项仅适用于 Docker 安装。请知悉,以下安装命令取代了 [Docker 安装链接](../../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)中的示例:
```
# 在最后一行 `rancher/rancher:vX.Y.Z` 中,请务必将 "X.Y.Z" 替换为包含 ARM64 版本的发布版本。例如,如果你的匹配版本是 v2.5.8,请在此行填写 `rancher/rancher:v2.5.8`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
index 19b9b27b2e0..19689914b9b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
@@ -4,17 +4,18 @@ title: 使用非默认支持的存储驱动
此功能允许你使用不是默认启用的存储提供商和卷插件。
-如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+如需启用或禁用此功能,请参见[启用实验功能主页](./enable-experimental-features.md)中的说明。
| 环境变量键 | 默认值 | 描述 |
----|---|---
+|---|---|---|
| `unsupported-storage-drivers` | `false` | 启用非默认启用的存储提供商和卷插件。 |
-### 默认启用的持久卷插件
+## 默认启用的持久卷插件
+
下表描述了默认启用的存储类型对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
| 名称 | 插件 |
---------|----------
+|--------|----------|
| Amazon EBS Disk | `aws-ebs` |
| AzureFile | `azure-file` |
| AzureDisk | `azure-disk` |
@@ -25,15 +26,16 @@ title: 使用非默认支持的存储驱动
| 网络文件系统 | `nfs` |
| hostPath | `host-path` |
-### 默认启用的 StorageClass
+## 默认启用的 StorageClass
+
下表描述了默认启用的 StorageClass 对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
| 名称 | 插件 |
---------|--------
+|--------|--------|
| Amazon EBS Disk | `aws-ebs` |
| AzureFile | `azure-file` |
| AzureDisk | `azure-disk` |
| Google Persistent Disk | `gce-pd` |
| Longhorn | `flex-volume-longhorn` |
| VMware vSphere Volume | `vsphere-volume` |
-| 本地 | `local` |
\ No newline at end of file
+| 本地 | `local` |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
index 39c9bdee036..2b7ec8c46b6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
@@ -19,11 +19,11 @@ title: 1. 在集群中启用 Istio
1. 如果你还没有安装 Monitoring 应用,系统会提示你安装 rancher-monitoring。你也可以选择在 Rancher-monitoring 安装上设置选择器或抓取配置选项。
1. 可选:为 Istio 组件配置成员访问和[资源限制](../../../integrations-in-rancher/istio/cpu-and-memory-allocations.md)。确保你的 Worker 节点上有足够的资源来启用 Istio。
1. 可选:如果需要,对 values.yaml 进行额外的配置更改。
-1. 可选:通过[覆盖文件](../../../pages-for-subheaders/configuration-options.md#覆盖文件)来添加其他资源或配置。
+1. 可选:通过[覆盖文件](../../../integrations-in-rancher/istio/configuration-options/configuration-options.md#覆盖文件)来添加其他资源或配置。
1. 单击**安装**。
**结果**:已在集群级别安装 Istio。
## 其他配置选项
-有关配置 Istio 的更多信息,请参阅[配置参考](../../../pages-for-subheaders/configuration-options.md)。
+有关配置 Istio 的更多信息,请参阅[配置参考](../../../integrations-in-rancher/istio/configuration-options/configuration-options.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
index 3651b9d77c6..d088ee46b56 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
@@ -19,11 +19,11 @@ title: 2. 在命名空间中启用 Istio
**结果**:命名空间带有了 `istio-injection=enabled` 标签。默认情况下,部署在此命名空间中的所有新工作负载都将注入 Istio sidecar。
-### 验证是否启用了自动 Istio Sidecar 注入
+## 验证是否启用了自动 Istio Sidecar 注入
要验证 Istio 是否已启用,请在命名空间中部署一个 hello-world 工作负载。转到工作负载并单击 pod 名称。在**容器**中,你应该能看到 `istio-proxy` 容器。
-### 排除工作负载的 Istio Sidecar 注入
+## 排除工作负载的 Istio Sidecar 注入
要排除 Istio sidecar 被注入某工作负载,请在工作负载上使用以下注释:
@@ -49,5 +49,5 @@ sidecar.istio.io/inject: “false”
:::
-### 后续步骤
+## 后续步骤
[使用 Istio Sidecar 添加部署](use-istio-sidecar.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
index 9782bd28938..f143dc2cdc6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
@@ -72,5 +72,6 @@ spec:
**结果**:生成流到该服务的流量时(例如,刷新 Ingress Gateway URL),你可以在 Kiali 流量图中看到流到 `reviews` 服务的流量被平均分配到了 `v1` 和 `v3`。
-### 后续步骤
+## 后续步骤
+
[生成和查看流量](generate-and-view-traffic.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
index 2290de911ad..cae09528074 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
@@ -19,7 +19,7 @@ title: 3. 使用 Istio Sidecar 添加部署和服务
等待几分钟,然后工作负载将升级并具有 Istio sidecar。单击它并转到**容器**。你应该能看到该工作负载旁边的 `istio-proxy`。这意味着为工作负载启用了 Istio sidecar。Istio 正在为 Sidecar Envoy 做所有的接线工作。如果你现在在 yaml 中启用它们,Istio 可以自动执行所有功能。
-### 添加部署和服务
+## 添加部署和服务
以下是在命名空间中添加新 **Deployment** 的几种方法:
@@ -46,7 +46,7 @@ title: 3. 使用 Istio Sidecar 添加部署和服务
1. 如果你的文件存储在本地集群中,运行 `kubectl create -f .yaml`。
1. 或运行 `cat<< EOF | kubectl apply -f -`,将文件内容粘贴到终端,然后运行 `EOF` 来完成命令。
-### 部署和服务示例
+## 部署和服务示例
接下来,我们为 Istio 文档中的 BookInfo 应用的示例部署和服务添加 Kubernetes 资源:
@@ -87,7 +87,7 @@ Productpage 服务和部署:
- 一个 `bookinfo-productpage` 的 ServiceAccount。
- 一个 `productpage-v1` Deployment。
-### 资源 YAML
+## 资源 YAML
```yaml
# Copyright 2017 Istio Authors
@@ -356,5 +356,6 @@ spec:
---
```
-### 后续步骤
+## 后续步骤
+
[设置 Istio Gateway](set-up-istio-gateway.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
index 91578148d71..0283b228b92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
@@ -4,18 +4,18 @@ title: Pod 安全策略
:::note
-本文介绍的集群选项仅适用于 [Rancher 已在其中启动 Kubernetes 的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本文介绍的集群选项仅适用于 [Rancher 已在其中启动 Kubernetes 的集群](../../new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
你可以在创建项目的时候设置 Pod 安全策略(PSP)。如果在创建项目期间没有为项目分配 PSP,你也随时可以将 PSP 分配给现有项目。
-### 先决条件
+## 先决条件
- 在 Rancher 中创建 Pod 安全策略。在将默认 PSP 分配给现有项目之前,你必须有一个可分配的 PSP。有关说明,请参阅[创建 Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md)。
- 将默认 Pod 安全策略分配给项目所属的集群。如果 PSP 还没有应用到集群,你无法将 PSP 分配给项目。有关详细信息,请参阅[将 pod 安全策略添加到集群](../../new-user-guides/manage-clusters/add-a-pod-security-policy.md)。
-### 应用 Pod 安全策略
+## 应用 Pod 安全策略
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到需要移动命名空间的集群,然后单击 **Explore**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
index ce57b2db349..f4af6d0c85b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
@@ -14,7 +14,7 @@ title: 项目资源配额
Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes.io/docs/concepts/policy/resource-quotas/)相同的功能。Rancher 还扩展了资源配额的功能,从而让你将资源配额应用于项目。有关资源配额如何与 Rancher 中的项目一起使用的详细信息,请参阅[此页面](about-project-resource-quotas.md)。
-### 将资源配额应用于现有项目
+## 将资源配额应用于现有项目
修改资源配额的使用场景如下:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
index f29c3004b85..359dcd7695a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
@@ -12,7 +12,7 @@ title: 覆盖命名空间的默认限制
有关详细信息,请参阅[如何编辑命名空间资源配额](../../../new-user-guides/manage-clusters/projects-and-namespaces.md)。
-### 编辑命名空间资源配额
+## 编辑命名空间资源配额
如果你已为项目配置了资源配额,你可以覆盖命名空间默认限制,从而为特定命名空间提供对更多(或更少)项目资源的访问权限:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
index e56f617e7a0..1735266bd74 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
@@ -6,7 +6,7 @@ title: 设置容器默认资源限制
为了避免在创建工作负载期间对每个容器设置这些限制,可以在命名空间上指定一个默认的容器资源限制。
-### 编辑容器默认资源限制
+## 编辑容器默认资源限制
你可以在以下情况下编辑容器的默认资源限制:
@@ -19,7 +19,7 @@ title: 设置容器默认资源限制
1. 找到要编辑容器默认资源限制的项目。在该项目中选择 **⋮ > 编辑配置**。
1. 展开**容器默认资源限制**并编辑对应的值。
-### 沿用资源限制
+## 沿用资源限制
在项目级别设置默认容器资源限制后,项目中所有新建的命名空间都会沿用这个资源限制参数。新设置的限制不会影响项目中现有的命名空间。你需要为项目中的现有命名空间手动设置默认容器资源限制,以便创建容器时能应用该限制。
@@ -27,7 +27,7 @@ title: 设置容器默认资源限制
在命名空间上配置容器默认资源限制后,在该命名空间中创建的任何容器都会沿用该默认值。你可以在工作负载创建期间覆盖这些限制/预留。
-### 容器资源配额类型
+## 容器资源配额类型
可以配置以下资源限制:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
index 1ce6b33f141..5590a9e5072 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
@@ -26,7 +26,7 @@ _项目_ 是 Rancher 中引入的对象,可帮助你更有组织地管理 Kube
- [配置工具](../../../reference-guides/rancher-project-tools.md)
- [配置 Pod 安全策略](manage-pod-security-policies.md)
-### 授权
+## 授权
非管理者用户只有在[管理员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者或成员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)将非管理员用户添加到项目的**成员**选项卡后,才能获取项目的访问权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
index 5fa126a9089..264cdb3ffc8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
@@ -4,13 +4,13 @@ title: 自定义 Grafana 仪表板
在本文中,你将学习通过自定义 Grafana 仪表板来显示特定容器的指标。
-### 先决条件
+## 先决条件
在自定义 Grafana 仪表板之前,你必须先安装 `rancher-monitoring` 应用。
要查看指向外部监控 UI(包括 Grafana 仪表板)的链接,你至少需要一个 [project-member 角色](../../../integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#具有-rancher-权限的用户)。
-### 登录 Grafana
+## 登录 Grafana
1. 在 Rancher UI 中,转到要自定义的仪表板的集群。
1. 在左侧导航栏中,单击**监控**。
@@ -19,7 +19,7 @@ title: 自定义 Grafana 仪表板
1. 登录到 Grafana。Grafana 实例的默认 Admin 用户名和密码是 `admin/prom-operator`(无论谁拥有密码,都需要 Rancher 的集群管理员权限才能访问 Grafana 实例)。你还可以在部署或升级 Chart 时替换凭证。
-### 获取支持 Grafana 面板的 PromQL 查询
+## 获取支持 Grafana 面板的 PromQL 查询
对于任何面板,你可以单击标题并单击 **Explore** 以获取支持图形的 PromQL 查询。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
index a51a45041ec..40a32dbadab 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
@@ -12,7 +12,7 @@ Prometheus 经过了优化,可以存储基于索引的序列数据。它是为
但是,Prometheus 没有就快速变化的时间序列数量进行对应的优化。因此,如果你在创建和销毁了大量资源的集群(尤其是多租户集群)上安装 Monitoring,可能会出现内存使用量激增的情况。
-### 减少内存激增
+## 减少内存激增
为了减少内存消耗,Prometheus 可以通过抓取更少的指标或在时间序列上添加更少的标签,从而存储更少的时间序列。要查看使用内存最多的序列,你可以查看 Prometheus UI 中的 TSDB(时序数据库)状态页面。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
index c6bac839971..ed318fe4724 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
@@ -4,7 +4,7 @@ title: 启用 Prometheus Federator
## 要求
-默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。
+默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。
有关安装 rancher-monitoring 的说明,请参阅[此页面](../enable-monitoring.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
index 10aaf5d95b7..ce1ad698b1b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
@@ -2,11 +2,11 @@
title: 为工作负载设置 Prometheus Federator
---
-### 显示工作负载的 CPU 和内存指标
+## 显示工作负载的 CPU 和内存指标
使用 Prometheus Federator 显示 CPU 和内存指标的方式与使用 rancher-monitoring 相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#显示工作负载的-cpu-和内存指标)。
-### 设置 CPU 和内存之外的指标
+## 设置 CPU 和内存之外的指标
使用 Prometheus Federator 设置 CPU 和内存之外的指标与使用 rancher-monitoring 的方式相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#设置-cpu-和内存之外的指标)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
index 7ab5dd36f79..f81aa879d86 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
@@ -12,13 +12,13 @@ Grafana 显示聚合数据,你也可以使用 PromQL 查询来查看单个工
要为你的工作负载设置自定义指标,你需要设置一个 Exporter 并创建一个新的 ServiceMonitor 自定义资源,从而将 Prometheus 配置为从 Exporter 中抓取指标。
-### 显示工作负载的 CPU 和内存指标
+## 显示工作负载的 CPU 和内存指标
默认情况下,Monitoring 应用会抓取 CPU 和内存指标。
要获取特定工作负载的细粒度信息,你可以自定义 Grafana 仪表板来显示该工作负载的指标。
-### 设置 CPU 和内存之外的指标
+## 设置 CPU 和内存之外的指标
对于自定义指标,你需要使用 Prometheus 支持的格式来公开应用上的指标。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
index 549bc6fa852..388da060f7a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
@@ -6,14 +6,14 @@ title: 高级配置
-### Alertmanager
+## Alertmanager
有关配置 Alertmanager 自定义资源的信息,请参阅[此页面。](alertmanager.md)
-### Prometheus
+## Prometheus
有关配置 Prometheus 自定义资源的信息,请参阅[此页面。](prometheus.md)
-### PrometheusRules
+## PrometheusRules
有关配置 PrometheusRules 自定义资源的信息,请参阅[此页面。](prometheusrules.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
index 458011a703c..8aab20e1b90 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
@@ -10,7 +10,7 @@ PrometheusRule 定义了一组 Prometheus 告警和/或记录规则。
:::
-### 在 Rancher UI 中创建 PrometheusRule
+## 在 Rancher UI 中创建 PrometheusRule
:::note 先决条件:
@@ -28,7 +28,7 @@ PrometheusRule 定义了一组 Prometheus 告警和/或记录规则。
**结果**:告警可以向接收器发送通知。
-### 关于 PrometheusRule 自定义资源
+## 关于 PrometheusRule 自定义资源
当你定义规则时(在 PrometheusRule 资源的 RuleGroup 中声明),[规则本身的规范](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule)会包含标签,然后 Alertmanager 会使用这些标签来确定接收此告警的路由。例如,标签为 `team: front-end` 的告警将发送到与该标签匹配的所有路由。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
index 7e43c7234b2..bc9c0e68048 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
@@ -31,7 +31,7 @@ target prot opt source destination
sudo iptables --list
```
-下文介绍如何使用 `firewalld`,将[防火墙端口规则](../../pages-for-subheaders/installation-requirements.md#端口要求)应用到高可用 Rancher Server 集群中的节点。
+下文介绍如何使用 `firewalld`,将[防火墙端口规则](../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)应用到高可用 Rancher Server 集群中的节点。
## 先决条件
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
index 6cadea62ced..755f0837752 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -4,9 +4,9 @@ title: 为大型安装进行 etcd 调优
当你运行具有 15 个或更多集群的大型 Rancher 安装时,我们建议你扩大 etcd 的默认 keyspace(默认为 2GB)。你最大可以将它设置为 8GB。此外,请确保主机有足够的 RAM 来保存整个数据集。如果需要增加这个值,你还需要同步增加主机的大小。如果你预计在垃圾回收间隔期间 Pod 的变化率很高,你也可以在较小的安装中调整 Keyspace 大小。
-Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
+Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
-### 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
+## 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
```yaml
# RKE cluster.yml
@@ -19,7 +19,7 @@ services:
## 扩展 etcd 磁盘性能
-你可以参见 [etcd 文档](https://etcd.io/docs/v3.4.0/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
+你可以参见 [etcd 文档](https://etcd.io/docs/v3.5/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
此外,为了减少 etcd 磁盘上的 IO 争用,你可以为 data 和 wal 目录使用专用设备。etcd 最佳实践不建议配置 Mirror RAID(因为 etcd 在集群中的节点之间复制数据)。你可以使用 striping RAID 配置来增加可用的 IOPS。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/add-users-to-projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/add-users-to-projects.md
index 4f4f6712b0e..dfc72d07c33 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/add-users-to-projects.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/add-users-to-projects.md
@@ -12,11 +12,11 @@ title: 添加项目成员
:::
-### 将成员添加到新项目
+## 将成员添加到新项目
你可以在创建项目时将成员添加到项目中(建议)。有关创建新项目的详细信息,请参阅[集群管理](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)。
-### 将成员添加到现有项目
+## 将成员添加到现有项目
创建项目后,你可以将用户添加为项目成员,以便用户可以访问项目的资源:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
index 94b0333f481..cb1637d2bc8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
@@ -8,7 +8,7 @@ title: 配置驱动
使用 Rancher 中的驱动,你可以管理可以使用哪些供应商来部署[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)或[云服务器节点](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),以允许 Rancher 部署和管理 Kubernetes。
-### Rancher 驱动
+## Rancher 驱动
你可以启用或禁用 Rancher 中内置的驱动。如果相关驱动 Rancher 尚未实现,你可以添加自己的驱动。
@@ -17,7 +17,7 @@ Rancher 中有两种类型的驱动:
* [集群驱动](#集群驱动)
* [主机驱动](#主机驱动)
-### 集群驱动
+## 集群驱动
集群驱动用于配置[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md),例如 GKE、EKS、AKS 等。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将显示为为托管 Kubernetes 集群创建集群的选项。默认情况下,Rancher 与几个现有的集群驱动打包在一起,但你也可以创建自定义集群驱动并添加到 Rancher。
@@ -33,7 +33,7 @@ Rancher 中有两种类型的驱动:
* [Huawei CCE](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md)
* [Tencent](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md)
-### 主机驱动
+## 主机驱动
主机驱动用于配置主机,Rancher 使用这些主机启动和管理 Kubernetes 集群。主机驱动与 [Docker Machine 驱动](https://docs.docker.com/machine/drivers/)相同。创建主机模板时可以显示的主机驱动,是由主机驱动的状态定义的。只有 `active` 主机驱动将显示为创建节点模板的选项。默认情况下,Rancher 与许多现有的 Docker Machine 驱动打包在一起,但你也可以创建自定义主机驱动并添加到 Rancher。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
index 5f9c30227ba..8a0deb1387e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
@@ -2,11 +2,11 @@
title: 集群驱动
---
-集群驱动用于在[托管 Kubernetes 提供商](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)(例如 Google GKE)中创建集群。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将作为创建集群的选项显示。默认情况下,Rancher 与多个现有的云提供商集群驱动打包在一起,但你也可以将自定义集群驱动添加到 Rancher。
+集群驱动用于在[托管 Kubernetes 提供商](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)(例如 Google GKE)中创建集群。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将作为创建集群的选项显示。默认情况下,Rancher 与多个现有的云提供商集群驱动打包在一起,但你也可以将自定义集群驱动添加到 Rancher。
如果你不想向用户显示特定的集群驱动,你可以在 Rancher 中停用这些集群驱动,它们将不会作为创建集群的选项出现。
-### 管理集群驱动
+## 管理集群驱动
:::note 先决条件:
@@ -36,7 +36,6 @@ title: 集群驱动
1. 在**集群驱动**选项卡上,单击**添加集群驱动**。
1. 填写**添加集群驱动**表单。然后单击**创建**。
-
-### 开发自己的集群驱动
+## 开发自己的集群驱动
如果要开发集群驱动并添加到 Rancher,请参考我们的[示例](https://github.com/rancher-plugins/kontainer-engine-driver-example)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
index bb49bcbe46a..f228428fdcd 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
@@ -6,7 +6,7 @@ title: 主机驱动
如果你不想向用户显示特定的主机驱动,则需要停用这些主机驱动。
-#### 管理主机驱动
+## 管理主机驱动
:::note 先决条件:
@@ -36,6 +36,6 @@ title: 主机驱动
1. 在**主机驱动**选项卡上,单击**添加主机驱动**。
1. 填写**添加主机驱动**表单。然后单击**创建**。
-### 开发自己的主机驱动
+## 开发自己的主机驱动
主机驱动使用 [Docker Machine](https://docs.docker.com/machine/) 来实现。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
index 947f2a9784c..8d7be6171d8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
@@ -22,7 +22,7 @@ title: 访问和共享
- 公开 RKE 模板,并与 Rancher 设置中的所有用户共享
- 与受信任修改模板的用户共享模板所有权
-### 与特定用户或组共享模板
+## 与特定用户或组共享模板
要允许用户或组使用你的模板创建集群,你可以为他们提供模板的基本**用户**访问权限。
@@ -36,7 +36,7 @@ title: 访问和共享
**结果**:用户或组可以使用模板创建集群。
-### 与所有用户共享模板
+## 与所有用户共享模板
1. 在左上角,单击 **☰ > 集群管理**。
1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
@@ -46,7 +46,7 @@ title: 访问和共享
**结果**:Rancher 设置中的所有用户都可以使用该模板创建集群。
-### 共享模板所有权
+## 共享模板所有权
如果你是模板的创建者,你可能希望将维护和更新模板的责任委派给其他用户或组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
index 3efac3e477e..1e20c57d100 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -10,10 +10,9 @@ RKE 模板可以应用于新集群。
你无法将集群更改为使用不同的 RKE 模板。你只能将集群更新为同一模板的新版本。
+## 使用 RKE 模板创建集群
-### 使用 RKE 模板创建集群
-
-要使用 RKE 模板添加[由基础设施提供商托管](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的集群,请按照以下步骤操作:
+要使用 RKE 模板添加[由基础设施提供商托管](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的集群,请按照以下步骤操作:
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,单击**创建**并选择基础设施提供商。
@@ -23,11 +22,11 @@ RKE 模板可以应用于新集群。
1. 可选:你可以编辑 RKE 模板所有者在创建模板时标记为**允许用户覆盖**的任何设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。然后,你需要编辑集群来将其升级到新版本。
1. 单击**创建**以启动集群。
-### 更新使用 RKE 模板创建的集群
+## 更新使用 RKE 模板创建的集群
模板所有者创建 RKE 模板时,每个设置在 Rancher UI 中都有一个开关,指示用户是否可以覆盖该设置。
-- 如果某个设置允许用户覆盖,你可以通过[编辑集群](../../../../pages-for-subheaders/cluster-configuration.md)来更新集群中的设置。
+- 如果某个设置允许用户覆盖,你可以通过[编辑集群](../../../../reference-guides/cluster-configuration/cluster-configuration.md)来更新集群中的设置。
- 如果该开关处于关闭状态,则除非集群所有者创建了允许你覆盖这些设置的模板修订版,否则你无法更改这些设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。
如果集群是使用 RKE 模板创建的,你可以编辑集群,来将集群更新为模板的新版本。
@@ -40,7 +39,7 @@ RKE 模板可以应用于新集群。
:::
-### 将现有集群转换为使用 RKE 模板
+## 将现有集群转换为使用 RKE 模板
本节介绍如何使用现有集群创建 RKE 模板。
@@ -56,4 +55,4 @@ RKE 模板可以应用于新集群。
- 创建了一个新的 RKE 模板。
- 将集群转换为使用该新模板。
-- 可以[使用新模板创建新集群](apply-templates.md#使用-rke-模板创建集群)。
\ No newline at end of file
+- 可以[使用新模板创建新集群](#使用-rke-模板创建集群)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
index a3cb27d9b6b..cce638a21d2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
@@ -8,11 +8,11 @@ title: RKE 模板和基础设施
如果要标准化集群中的硬件,请将 RKE 模板与节点模板或服务器配置工具 (如 Terraform) 结合使用。
-### 节点模板
+## 节点模板
[节点模板](../../../../reference-guides/user-settings/manage-node-templates.md)负责 Rancher 中的节点配置和节点预配。你可以在用户配置文件中设置节点模板,从而定义在每个节点池中使用的模板。启用节点池后,可以确保每个节点池中都有所需数量的节点,并确保池中的所有节点都相同。
-### Terraform
+## Terraform
Terraform 是一个服务器配置工具。它使用基础架构即代码,支持使用 Terraform 配置文件创建几乎所有的基础设施。它可以自动执行服务器配置,这种方式是自文档化的,并且在版本控制中易于跟踪。
@@ -21,14 +21,13 @@ Terraform 是一个服务器配置工具。它使用基础架构即代码,支
Terraform 支持:
- 定义几乎任何类型的基础架构即代码,包括服务器、数据库、负载均衡器、监控、防火墙设置和 SSL 证书
-- 使用应用商店应用和多集群应用
- 跨多个平台(包括 Rancher 和主要云提供商)对基础设施进行编码
- 将基础架构即代码提交到版本控制
- 轻松重复使用基础设施的配置和设置
- 将基础架构更改纳入标准开发实践
- 防止由于配置偏移,导致一些服务器的配置与其他服务器不同
-## Terraform 工作原理
+### Terraform 工作原理
Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配置语言编写的。HashiCorp 配置语言是一种声明性语言,支持定义集群中所需的基础设施、正在使用的云提供商以及提供商的凭证。然后 Terraform 向提供商发出 API 调用,以便有效地创建基础设施。
@@ -38,7 +37,7 @@ Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配
如果你需要对基础设施进行更改,你可以在 Terraform 配置文件中进行更改,而不是手动更新服务器。然后,可以将这些文件提交给版本控制、验证,并根据需要进行检查。然后,当你运行 `terraform apply` 时,更改将会被部署。
-## 使用 Terraform 的技巧
+### 使用 Terraform 的技巧
- [Rancher 2 提供商文档](https://www.terraform.io/docs/providers/rancher2/)提供了如何配置集群大部分的示例。
@@ -54,7 +53,7 @@ Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配
本节描述了一种方法,可以使安全合规相关的配置文件成为集群的标准配置文件。
-在你创建[符合 CIS 基准的集群](../../../../pages-for-subheaders/rancher-security.md)时,你有一个加密配置文件和一个审计日志配置文件。
+在你创建[符合 CIS 基准的集群](../../../../reference-guides/rancher-security/rancher-security.md)时,你有一个加密配置文件和一个审计日志配置文件。
你的基础设施预配系统可以将这些文件写入磁盘。然后在你的 RKE 模板中,你需要指定这些文件的位置,然后将你的加密配置文件和审计日志配置文件作为额外的挂载添加到 `kube-api-server`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
index 6e4adb98239..0e2393f74c4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
@@ -10,14 +10,13 @@ title: 创建和修改 RKE 模板
模板所有者对模板修订版具有完全控制权,并且可以创建新的修订版来更新模板,删除或禁用不应被用于创建集群的修订版,和设置默认的模板修订版。
-
-### 先决条件
+## 先决条件
如果你具有**创建 RKE 模板**权限,则可以创建 RKE 模板,该权限可由[管理员授予](creator-permissions.md)。
如果你是模板的所有者,你可以修改、共享和删除模板。有关如何成为模板所有者的详细信息,请参阅[共享模板所有权文档](access-or-share-templates.md#共享模板所有权)。
-### 创建模板
+## 创建模板
1. 在左上角,单击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -26,9 +25,9 @@ title: 创建和修改 RKE 模板
1. 可选:通过将用户添加为成员,来[与其他用户或组共享模板](access-or-share-templates.md#与特定用户或组共享模板)。你还可以将模板公开,从而与 Rancher 中的所有人共享。
1. 然后按照屏幕上的表格将集群配置参数保存为模板修订的一部分。可以将修订标记为此模板的默认值。
-**结果**:配置了具有一个修订版的 RKE 模板。你可以稍后在[配置 Rancher 启动的集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)时使用此 RKE 模板修订版。通过 RKE 模板管理集群后,集群无法解除与模板的绑定,并且无法取消选中**使用现有 RKE 模板和修订版**。
+**结果**:配置了具有一个修订版的 RKE 模板。你可以稍后在[配置 Rancher 启动的集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)时使用此 RKE 模板修订版。通过 RKE 模板管理集群后,集群无法解除与模板的绑定,并且无法取消选中**使用现有 RKE 模板和修订版**。
-### 更新模板
+## 更新模板
更新 RKE 模板相当于创建现有模板的修订版。使用旧版本模板创建的集群可以进行更新,从而匹配新版本。
@@ -44,7 +43,7 @@ title: 创建和修改 RKE 模板
**结果**:模板已更新。要将其应用到使用旧版本模板的集群,请参阅[升级集群以使用新的模板修订版](#升级集群以使用新的模板修订版)。
-### 删除模板
+## 删除模板
当不再需要为任何集群使用某个 RKE 模板时,可以将其删除。
@@ -55,7 +54,7 @@ title: 创建和修改 RKE 模板
**结果**:模板被删除。
-### 基于默认版创建新修订版
+## 基于默认版创建新修订版
你可以复制默认模板修订版并快速更新其设置,而无需从头开始创建新修订版。克隆模板为你省去了重新输入集群创建所需的访问密钥和其他参数的麻烦。
@@ -66,7 +65,7 @@ title: 创建和修改 RKE 模板
**结果**:克隆并配置了 RKE 模板修订版。
-### 基于克隆版创建新修订版
+## 基于克隆版创建新修订版
通过用户设置创建新的 RKE 模板修订版时,可以克隆现有修订版并快速更新其设置,而无需从头开始创建新的修订版。克隆模板修订省去了重新输入集群参数的麻烦。
@@ -77,7 +76,7 @@ title: 创建和修改 RKE 模板
**结果**:克隆并配置了 RKE 模板修订版。你可以在配置集群时使用 RKE 模板修订。任何使用此 RKE 模板的现有集群都可以升级到此新版本。
-### 禁用模板修订版
+## 禁用模板修订版
当你不需要将 RKE 模板修订版本用于创建新集群时,可以禁用模板修订版。你也可以重新启用禁用了的修订版。
@@ -89,7 +88,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版不能用于创建新集群。
-### 重新启用禁用的模板修订版
+## 重新启用禁用的模板修订版
如果要使用已禁用的 RKE 模板修订版来创建新集群,你可以重新启用该修订版。
@@ -99,7 +98,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版可用于创建新集群。
-### 将模板修订版设置为默认
+## 将模板修订版设置为默认
当最终用户使用 RKE 模板创建集群时,他们可以选择使用哪个版本来创建集群。你可以配置默认使用的版本。
@@ -111,7 +110,7 @@ title: 创建和修改 RKE 模板
**结果**:使用模板创建集群时,RKE 模板修订版将用作默认选项。
-### 删除模板修订版
+## 删除模板修订版
你可以删除模板的所有修订(默认修订除外)。
@@ -123,7 +122,7 @@ title: 创建和修改 RKE 模板
**结果**:RKE 模板修订版被删除。
-### 升级集群以使用新的模板修订版
+## 升级集群以使用新的模板修订版
:::note
@@ -142,7 +141,7 @@ title: 创建和修改 RKE 模板
**结果**:集群已升级为使用新模板修订版中定义的设置。
-### 将正在运行的集群导出到新的 RKE 模板和修订版
+## 将正在运行的集群导出到新的 RKE 模板和修订版
你可以将现有集群的设置保存为 RKE 模板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
index b9997d6e7b4..9d2a9590372 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
@@ -42,7 +42,7 @@ Rancher 认证代理可以与以下外部认证服务集成。
## 用户和组
-Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目、多集群应用以及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
+Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
:::note
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
index 704c47a68c5..e6ba881dc5a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
@@ -4,11 +4,11 @@ title: 配置 Active Directory (AD)
如果你的组织使用 Microsoft Active Directory 作为中心用户仓库,你可以将 Rancher 配置为与 Active Directory 服务器通信,从而对用户进行身份验证。这使 Rancher 管理员可以对外部用户系统中的用户和组进行集群和项目的访问控制,同时允许最终用户在登录 Rancher UI 时使用 Active Directory 凭证进行身份验证。
-Rancher 使用 LDAP 与 Active Directory 服务器通信。因此,Active Directory 与 [OpenLDAP 身份验证](../../../../pages-for-subheaders/configure-openldap.md)的流程相同。
+Rancher 使用 LDAP 与 Active Directory 服务器通信。因此,Active Directory 与 [OpenLDAP 身份验证](../configure-openldap/configure-openldap.md)的流程相同。
:::note
-在开始之前,请熟悉[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)的概念。
+在开始之前,请熟悉[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)的概念。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
index 77d51e007bf..45c5a5d12cb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
@@ -43,7 +43,6 @@ Rancher 中的 Microsoft Graph API 流程正在不断发展。建议你使用最

1. 输入 **Name**(例如 `Rancher`)。
-
1. 在 **Supported account types** 中,选择 **Accounts in this organizational directory only (AzureADTest only - Single tenant)**。这对应于旧版应用注册选项。
@@ -260,7 +259,7 @@ Rancher 未测试也未完全支持自定义端点。
#### 离线环境
-在离线环境中,由于 Graph Endpoint URL 正在更改,因此管理员需要确保其端点被[列入白名单](#3.2)。
+在离线环境中,由于 Graph Endpoint URL 正在更改,因此管理员需要确保其端点被[列入白名单](#1-在-azure-注册-rancher)。
#### 回滚迁移
@@ -322,5 +321,5 @@ Rancher 未测试也未完全支持自定义端点。
>
> - 如果你不想在 Azure AD Graph API 停用后升级到 v2.7.0+,你需要:
> - 使用内置的 Rancher 身份认证,或者
-> - 使用另一个第三方身份认证系统并在 Rancher 中进行设置。请参阅[身份验证文档](../../../../pages-for-subheaders/authentication-config.md),了解如何配置其他开放式身份验证提供程序。
+> - 使用另一个第三方身份认证系统并在 Rancher 中进行设置。请参阅[身份验证文档](./authentication-config.md),了解如何配置其他开放式身份验证提供程序。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
index 6f3100e462c..eab27ffb571 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
@@ -8,7 +8,7 @@ title: 配置 FreeIPA
- 你必须配置了 [FreeIPA 服务器](https://www.freeipa.org/)。
- 在 FreeIPA 中创建一个具有 `read-only` 访问权限的 ServiceAccount 。当用户使用 API 密钥发出请求时,Rancher 使用此账号来验证组成员身份。
-- 参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+- 参见[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
index e903421611d..59eb79748fe 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
@@ -6,7 +6,7 @@ title: 配置 GitHub
:::note 先决条件:
-参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+参见[外部身份验证配置和主体用户](./authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
index 212f7cf45d1..4b9daebde82 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
@@ -96,7 +96,7 @@ OpenLDAP ServiceAccount 用于所有搜索。无论用户个人的 SAML 权限
[配置 OpenLDAP Server、组和用户的设置](../configure-openldap/openldap-config-reference.md)。请注意,不支持嵌套组成员。
-> 在继续配置之前,请熟悉[外部身份认证配置和主要用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+> 在继续配置之前,请熟悉[外部身份认证配置和主要用户](./authentication-config.md#外部认证配置和用户主体)。
1. 使用分配了 [administrator](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions) 角色(即 _本地主体_)的本地用户登录到 Rancher。
1. 在左上角,单击 **☰ > 用户 & 认证**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
index b44cc440312..72dda0595a4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
@@ -4,7 +4,7 @@ title: 用户和组
Rancher 依赖用户和组来决定允许登录到 Rancher 的用户,以及他们可以访问哪些资源。你配置外部身份验证提供程序后,该提供程序的用户将能够登录到你的 Rancher Server。用户登录时,验证提供程序将向你的 Rancher Server 提供该用户所属的组列表。
-你可以通过向资源添加用户或组,来控制其对集群、项目、多集群应用、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+你可以通过向资源添加用户或组,来控制其对集群、项目、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
## 管理成员
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
index 83bcfee396b..ed1dcf5df1f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
@@ -83,6 +83,6 @@ https:///federationmetadata/2007-06/federationmetadata.xml
**结果**:你已将 Rancher 添加为依赖信任方。现在你可以配置 Rancher 来使用 AD。
-### 后续操作
+## 后续操作
[在 Rancher 中配置 Microsoft AD FS ](configure-rancher-for-ms-adfs.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
index 26e89e2058d..325f86ca45b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
@@ -41,17 +41,13 @@ title: 2. 在 Rancher 中配置 Microsoft AD FS
| UID 字段 | 每个用户独有的 AD 属性。 示例:`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` |
| 用户组字段 | 创建用于管理组成员关系的条目。 示例:`http://schemas.xmlsoap.org/claims/Group` |
| Rancher API 主机 | Rancher Server 的 URL。 |
-| 私钥/证书 | 在 Rancher 和你的 AD FS 之间创建安全外壳(SSH)的密钥/证书对。确保将 Common Name (CN) 设置为 Rancher Server URL。 [证书创建命令](#cert-command) |
+| 私钥/证书 | 在 Rancher 和你的 AD FS 之间创建安全外壳(SSH)的密钥/证书对。确保将 Common Name (CN) 设置为 Rancher Server URL。 [证书创建命令](#example-certificate-creation-command) |
| 元数据 XML | 从 AD FS 服务器导出的 `federationmetadata.xml` 文件。 你可以在 `https:///federationmetadata/2007-06/federationmetadata.xml` 找到该文件。 |
-
-
-:::tip
+### Example Certificate Creation Command
你可以使用 openssl 命令生成证书。例如:
```
openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
```
-
-:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
index 7594371a296..652e2457f37 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
@@ -53,4 +53,4 @@ title: 配置 OpenLDAP
## 附录:故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
index 4321f1e6765..ab226898b81 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
@@ -10,14 +10,14 @@ title: Shibboleth 和 OpenLDAP 的组权限
你可以通过配置 OpenLDAP 来解决这个问题。如果让 Shibboleth 使用 OpenLDAP 后端,你将能够在 Rancher 中搜索组,并从 Rancher UI 将集群、项目或命名空间等资源分配给用户组。
-### 名词解释
+## 名词解释
- **Shibboleth**:用于计算机网络和互联网的单点登录系统。它允许用户仅使用一种身份登录到各种系统。它验证用户凭证,但不单独处理组成员身份。
- **SAML**:安全声明标记语言(Security Assertion Markup Language),用于在身份提供程序和服务提供商之间交换认证和授权数据的开放标准。
- **OpenLDAP**:轻型目录访问协议(LDAP)的免费开源实现。它用于管理组织的计算机和用户。OpenLDAP 对 Rancher 用户很有用,因为它支持组。只要组已存在于身份提供程序中,你就可以在 Rancher 中为组分配权限,从而让组访问资源(例如集群,项目或命名空间)。
- **IdP 或 IDP**:身份提供程序。OpenLDAP 是身份提供程序的一个例子。
-### 将 OpenLDAP 组权限添加到 Rancher 资源
+## 将 OpenLDAP 组权限添加到 Rancher 资源
下图说明了 OpenLDAP 组的成员如何访问 Rancher 中该组有权访问的资源。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
index 285a5d3e6aa..fe0c5cde49b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
@@ -101,4 +101,4 @@ SAML 协议不支持用户或用户组的搜索或查找。因此,如果你没
## 故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
index 1136fdabc91..927291ced0b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
@@ -56,9 +56,9 @@ Rancher 内置了三个默认 Pod 安全策略 (PSP),分别是 `restricted-nor
### 要求
-Rancher 只能为[使用 RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)分配 PSP。
+Rancher 只能为[使用 RKE 启动的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)分配 PSP。
-你必须先在集群级别启用 PSP,然后才能将它们分配给项目。这可以通过[编辑集群](../../../pages-for-subheaders/cluster-configuration.md)来配置。
+你必须先在集群级别启用 PSP,然后才能将它们分配给项目。这可以通过[编辑集群](../../../reference-guides/cluster-configuration/cluster-configuration.md)来配置。
最好的做法是在集群级别设置 PSP。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
index 818ccfc025a..bb7a5210a58 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
@@ -5,7 +5,7 @@ title: 配置全局默认私有镜像仓库
:::note
本页介绍了安装 Rancher 后如何从 Rancher UI 配置全局默认私有镜像仓库。
-有关如何在 Rancher 安装期间设置私有镜像仓库的说明,请参阅[离线安装指南](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+有关如何在 Rancher 安装期间设置私有镜像仓库的说明,请参阅[离线安装指南](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
index 09b1cec4990..8f293098c0c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
@@ -9,7 +9,7 @@ title: 集群和项目角色
1. 单击 **☰ > 用户 & 认证**。
1. 在左侧导航栏中,单击**角色**并转到**集群**或**项目或命名空间**选项卡。
-### 成员资格和角色分配
+## 成员资格和角色分配
非管理用户可以访问的项目和集群由 _成员资格_ 决定。成员资格是根据该集群或项目中分配的角色而有权访问特定集群或项目的用户列表。每个集群和项目都包含一个选项卡,具有适当权限的用户可以使用该选项卡来管理成员资格。
@@ -21,7 +21,7 @@ title: 集群和项目角色
:::
-### 集群角色
+## 集群角色
_集群角色_ 是你可以分配给用户的角色,以授予他们对集群的访问权限。集群的两个主要角色分别是`所有者`和`成员`。
@@ -33,11 +33,11 @@ _集群角色_ 是你可以分配给用户的角色,以授予他们对集群
可以查看大多数集群级别的资源并创建新项目。
-#### 自定义集群角色
+### 自定义集群角色
Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典型的`所有者`或`成员`角色。这些角色可以是内置的自定义集群角色,也可以是 Rancher 管理员定义的角色。这些角色便于为集群内的普通用户定义更受限或特定的访问权限。有关内置自定义集群角色的列表,请参阅下表。
-#### 集群角色参考
+### 集群角色参考
下表列出了可用的内置自定义集群角色,以及默认的集群级别角色`集群所有者`和`集群成员`是否包含该权限:
@@ -54,7 +54,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
| 查看集群成员 | ✓ | ✓ |
| 查看节点 | ✓ | ✓ |
-#### 管理节点权限
+### 管理节点权限
下表列出了 RKE 和 RKE2 中`管理节点`角色可用的权限:
@@ -79,7 +79,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
:::
-### 为集群成员提供自定义集群角色
+## 为集群成员提供自定义集群角色
在管理员[设置自定义集群角色后](custom-roles.md),集群所有者和管理员可以将这些角色分配给集群成员。
@@ -121,7 +121,7 @@ Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典
**结果**:成员具有所分配的角色。
-### 项目角色
+## 项目角色
_项目角色_ 是用于授予用户访问项目权限的角色。主要的项目角色分别是`所有者`、`成员`和`只读`。
@@ -149,11 +149,11 @@ _项目角色_ 是用于授予用户访问项目权限的角色。主要的项
:::
-#### 自定义项目角色
+### 自定义项目角色
Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典型的`所有者`、`成员`或`只读`角色。这些角色可以是内置的自定义项目角色,也可以是 Rancher 管理员定义的角色。这些角色便于为项目内的普通用户定义更受限或特定的访问权限。有关内置自定义项目角色的列表,请参阅下表。
-#### 项目角色参考
+### 项目角色参考
下表列出了 Rancher 中可用的内置自定义项目角色,以及这些角色是否由`所有者`,`成员`或`只读`角色授予的:
@@ -187,12 +187,12 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
:::
-### 定义自定义角色
+## 定义自定义角色
如前所述,你可以定义自定义角色,并将这些角色用在集群或项目中。上下文字段定义了角色是否显示在集群成员页面、项目成员页面或同时显示在这两个页面。
定义自定义角色时,你可以授予对特定资源的访问权限,或指定自定义角色应继承的角色。自定义角色可以由特定授权和继承角色组成。所有授权都是累加的。换言之,如果你为特定资源定义更受限的授权,自定义角色继承的角色中定义的更广泛的授权**不会**被覆盖。
-### 默认集群和项目角色
+## 默认集群和项目角色
默认情况下,在普通用户创建新集群或项目时,他们会自动分配到所有者的角色,即[集群所有者](#集群角色)或[项目所有者](#项目角色)。但是,在某些组织中,这些角色可能会被认为有过多的管理访问权限。在这种情况下,你可以将默认角色更改为更具限制性的角色,例如一组单独的角色或一个自定义角色。
@@ -211,7 +211,7 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
:::
-### 为集群和项目创建者配置默认角色
+## 为集群和项目创建者配置默认角色
你可以更改为创建集群或项目的用户自动创建的角色:
@@ -226,7 +226,7 @@ Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典
如果要删除默认角色,请编辑权限,并在默认角色选项中选择**否**。
-### 撤销集群成员资格
+## 撤销集群成员资格
如果你撤销一个普通用户的集群成员资格,而且该用户已显式分配集群的集群 _和_ 项目的成员资格,该普通用户将[失去集群角色](#集群角色)但[保留项目角色](#项目角色)。换句话说,即使你已经撤销了用户访问集群和其中的节点的权限,但该普通用户仍然可以:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
index 0ab8560983b..80d740addcc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
@@ -98,7 +98,7 @@ title: 自定义角色
只有在以下情况下,你才能将全局角色分配给组:
-* 你已设置[外部身份验证提供程序](../../../../pages-for-subheaders/authentication-config.md#外部验证与本地验证)。
+* 你已设置[外部身份验证提供程序](../authentication-config/authentication-config.md#外部认证与本地认证)。
* 外部身份验证提供程序支持[用户组](../../authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md)。
* 你已使用身份验证提供程序设置了至少一个用户组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
index 7f7606413f1..7eb2cd0457e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
@@ -254,7 +254,7 @@ inheritedClusterRoles:
只有在以下情况下,你才能将全局角色分配给组:
-- 你已设置[外部认证](../authentication-config/authentication-config.md#external-vs-local-authentication)
+- 你已设置[外部认证](../authentication-config/authentication-config.md#外部认证与本地认证)
- 外部认证服务支持[用户组](../authentication-config/manage-users-and-groups.md)
- 你已使用外部认证服务设置了至少一个用户组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
index 1f56b62762f..7c16ac10192 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
@@ -15,12 +15,12 @@ PSS 定义了工作负载的安全级别。PSA 描述了 Pod 安全上下文和
必须在删除 PodSecurityPolicy 对象_之前_添加新的策略执行机制。否则,你可能会为集群内的特权升级攻击创造机会。
:::
-### 从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies {#remove-psp-rancher-workloads}
+### 从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies
Rancher v2.7.2 提供了 Rancher 维护的 Helm Chart 的新主要版本。v102.x.y 允许你删除与以前的 Chart 版本一起安装的 PSP。这个新版本使用标准化的 `global.cattle.psp.enabled` 开关(默认关闭)替换了非标准的 PSP 开关。
你必须在_仍使用 Kubernetes v1.24_ 时执行以下步骤:
-1. 根据需要配置 PSA 控制器。你可以使用 Rancher 的内置 [PSA 配置模板](#psa-config-templates),或创建自定义模板并将其应用于正在迁移的集群。
+1. 根据需要配置 PSA 控制器。你可以使用 Rancher 的内置 [PSA 配置模板](#pod-安全准入配置模板),或创建自定义模板并将其应用于正在迁移的集群。
1. 将活动的 PSP 映射到 Pod 安全标准:
1. 查看集群中哪些 PSP 仍处于活动状态:
@@ -108,14 +108,14 @@ Helm 尝试在集群中查询存储在先前版本的数据 blob 中的对象时
#### 将 Chart 升级到支持 Kubernetes v1.25 的版本
-清理了具有 PSP 的所有版本后,你就可以继续升级了。对于 Rancher 维护的工作负载,请按照本文档[从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies](#remove-psp-rancher-workloads) 部分中的步骤进行操作。
+清理了具有 PSP 的所有版本后,你就可以继续升级了。对于 Rancher 维护的工作负载,请按照本文档[从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies](#从-rancher-维护的应用程序和市场工作负载中删除-podsecuritypolicies) 部分中的步骤进行操作。
如果工作负载不是由 Rancher 维护的,请参阅对应的提供商的文档。
:::caution
不要跳过此步骤。与 Kubernetes v1.25 不兼容的应用程序不能保证在清理后正常工作。
:::
-## Pod 安全准入配置模板 {#psa-config-templates}
+## Pod 安全准入配置模板
Rancher 提供了 PSA 配置模板。它们是可以应用到集群的预定义安全配置。Rancher 管理员(或具有权限的人员)可以[创建、管理和编辑](./psa-config-templates.md) PSA 模板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
index c3b4d6252f4..5089f8e11d7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
@@ -32,7 +32,7 @@ title: Pod 安全准入 (PSA) 配置模板
### 加固集群
-如果选择 **rancher-restricted** 模板但不选择 **CIS 配置文件**,你将无法满足 CIS Benchmark。有关详细信息,请参阅 [RKE2 加固指南](../../../pages-for-subheaders/rke2-hardening-guide.md)。
+如果选择 **rancher-restricted** 模板但不选择 **CIS 配置文件**,你将无法满足 CIS Benchmark。有关详细信息,请参阅 [RKE2 加固指南](../../../reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-hardening-guide.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
index 3ba17f6ed64..6e574bd90dc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
@@ -2,7 +2,7 @@
title: 备份集群
---
-在 Rancher UI 中,你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。
+在 Rancher UI 中,你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md))的 etcd。
Rancher 建议为所有生产集群配置定期 `etcd` 快照。此外,你还可以创建单次快照。
@@ -161,7 +161,7 @@ Rancher 在创建 RKE2 或 K3s 集群的快照时,快照名称是基于快照
选择创建定期快照的频率以及要保留的快照数量。时间的单位是小时。用户可以使用时间戳快照进行时间点恢复。
-默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)会配置为创建定期快照(保存到本地磁盘)。为防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+默认情况下,[Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)会配置为创建定期快照(保存到本地磁盘)。为防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
在集群配置或编辑集群期间,可以在**集群选项**的高级部分中找到快照的配置。点击**显示高级选项**。
@@ -179,7 +179,7 @@ Rancher 在创建 RKE2 或 K3s 集群的快照时,快照名称是基于快照
设置创建定期快照的方式以及要保留的快照数量。该计划采用传统的 Cron 格式。保留策略规定了在每个节点上要保留的匹配名称的快照数量。
-默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)从凌晨 12 点开始每 5 小时创建一次定期快照(保存到本地磁盘)。为了防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+默认情况下,[Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md))从凌晨 12 点开始每 5 小时创建一次定期快照(保存到本地磁盘)。为了防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
在集群配置或编辑集群期间,你可以在**集群配置**下找到快照配置。单击 **etcd**。
@@ -244,12 +244,12 @@ Rancher 支持两种不同的备份目标:
-默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会在本地自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中 etcd 节点的 `/opt/rke/etcd-snapshots` 中。所有定期快照都是按照配置的时间间隔创建的。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会在本地自动保存到 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md))中 etcd 节点的 `/opt/rke/etcd-snapshots` 中。所有定期快照都是按照配置的时间间隔创建的。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
-默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中的本地 etcd 节点上的 `/var/lib/rancher//server/db/snapshots` 中,其中 `` 可以是 `k3s` 或 `rke2`。所有定期快照均按照 Cron 计划进行。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会自动保存到 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md))中的本地 etcd 节点上的 `/var/lib/rancher//server/db/snapshots` 中,其中 `` 可以是 `k3s` 或 `rke2`。所有定期快照均按照 Cron 计划进行。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
index cf023f56e0a..b113fda1c5e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
@@ -14,13 +14,13 @@ title: 备份 Rancher
:::
-### 先决条件
+## 先决条件
Rancher 必须是 2.5.0 或更高版本。
请参见[此处](migrate-rancher-to-new-cluster.md#2-使用-restore-自定义资源来还原备份)获取在 Rancher 2.6.3 中将现有备份文件恢复到 v1.22 集群的帮助。
-### 1. 安装 Rancher Backup Operator
+## 1. 安装 Rancher Backup Operator
备份存储位置是 operator 级别的设置,所以需要在安装或升级 `rancher backup` 应用时进行配置。
@@ -36,11 +36,11 @@ Rancher 必须是 2.5.0 或更高版本。
:::note
-使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../deploy-apps-across-clusters/fleet.md#故障排除)获得解决方法。
+使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../../../integrations-in-rancher/fleet/overview.md#故障排除)获得解决方法。
:::
-### 2. 执行备份
+## 2. 执行备份
要执行备份,必须创建 Backup 类型的自定义资源。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
index 5aa5ac6e523..821a713e62c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
@@ -4,7 +4,7 @@ title: 将 Rancher 迁移到新集群
如果你要将 Rancher 迁移到一个新的 Kubernetes 集群,先不要在新集群上安装 Rancher。这是因为如果将 Rancher 还原到已安装 Rancher 的新集群,可能会导致问题。
-### 先决条件
+## 先决条件
以下说明假设你已经完成[备份创建](back-up-rancher.md),并且已经安装了用于部署 Rancher 的新 Kubernetes 集群。
@@ -21,7 +21,7 @@ Rancher 可以安装到任意 Kubernetes 集群上,包括托管的 Kubernetes
- [RKE Kubernetes 安装文档](https://rancher.com/docs/rke/latest/en/installation/)
- [K3s Kubernetes 安装文档](https://rancher.com/docs/k3s/latest/en/installation/)
-### 1. 安装 rancher-backup Helm Chart
+## 1. 安装 rancher-backup Helm Chart
安装 [rancher-backup chart](https://github.com/rancher/backup-restore-operator/tags),请使用 2.x.x 主要版本内的版本:
1. 添加 helm 仓库:
@@ -55,7 +55,7 @@ Rancher 可以安装到任意 Kubernetes 集群上,包括托管的 Kubernetes
:::
-### 2. 使用 Restore 自定义资源来还原备份
+## 2. 使用 Restore 自定义资源来还原备份
:::note 重要提示:
@@ -150,11 +150,11 @@ Kubernetes v1.22 是 Rancher 2.6.3 的实验功能,不支持使用 apiVersion
1. Restore 资源的状态变成 `Completed` 后,你可以继续安装 cert-manager 和 Rancher。
-### 3. 安装 cert-manager
+## 3. 安装 cert-manager
-按照在 Kubernetes 上安装 cert-manager的步骤[安装 cert-manager](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。
+按照在 Kubernetes 上安装 cert-manager的步骤[安装 cert-manager](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。
-### 4. 使用 Helm 安装 Rancher
+## 4. 使用 Helm 安装 Rancher
使用与第一个集群上使用的相同版本的 Helm 来安装 Rancher:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
index 97e23f81446..fc8066fe879 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
@@ -2,7 +2,7 @@
title: 使用备份恢复集群
---
-你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
+你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcdrke)。
@@ -126,4 +126,4 @@ Rancher UI 中提供了集群所有可用快照的列表:
5. 运行修改后的命令。
-6. 在单个节点启动并运行后,Rancher 建议向你的集群添加额外的 etcd 节点。如果你有一个[自定义集群](../../../pages-for-subheaders/use-existing-nodes.md),并且想要复用旧节点,则需要先[清理节点](../manage-clusters/clean-cluster-nodes.md),然后再尝试将它们重新添加到集群中。
+6. 在单个节点启动并运行后,Rancher 建议向你的集群添加额外的 etcd 节点。如果你有一个[自定义集群](../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md),并且想要复用旧节点,则需要先[清理节点](../manage-clusters/clean-cluster-nodes.md),然后再尝试将它们重新添加到集群中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
index eeb6cb521f0..0a5eba5a19a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
@@ -21,7 +21,7 @@ title: 还原 Rancher
:::
-### 创建 Restore 自定义资源
+## 创建 Restore 自定义资源
还原是通过创建 Restore 自定义资源实现的。
@@ -60,7 +60,7 @@ title: 还原 Rancher
2. 集群范围资源
3. 命名空间资源
-### 日志
+## 日志
如需查看还原的处理方式,请检查 Operator 的日志。查看日志的命令如下:
@@ -68,11 +68,11 @@ title: 还原 Rancher
kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f
```
-### 清理
+## 清理
如果你使用 kubectl 创建了 Restore 资源,请删除该资源以防止与未来的还原发生命名冲突。
-### 已知问题
+## 已知问题
在某些情况下,恢复备份后,Rancher 日志会显示类似以下的错误:
```
2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
deleted file mode 100644
index 4d4d465d136..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-
-
-
-不同版本的 Rancher 提供了几种不同的方式来部署跨集群应用。
-
-## Fleet
-
-Rancher v2.5 及更高版本使用 Fleet 跨集群部署应用
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](fleet.md)。
-
-### 多集群应用
-
-在 v2.5 之前的 Rancher 中,多集群应用功能用于跨集群部署应用。多集群应用功能已弃用,但仍可作为旧版功能使用。
-
-详情请参阅[此文档](multi-cluster-apps.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
deleted file mode 100644
index def223a8337..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-title: 使用 Feet 进行持续交付
----
-
-使用 Fleet 的持续交付是大规模的 GitOps。你可以使用 Fleet 管理多达一百万个集群。Fleet 非常轻量,可以很好地用于[单个集群](https://fleet.rancher.io/installation#default-install),但是在你达到[大规模](https://fleet.rancher.io/installation#configuration-for-multi-cluster)时,它能发挥更强的实力。此处的大规模指的是大量集群、大量部署、或组织中存在大量团队的情况。
-
-Fleet 是一个独立于 Rancher 的项目,你可以使用 Helm 将它安装在任何 Kubernetes 集群上。
-
-
-## 架构
-
-有关 Fleet 工作原理的信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/architecture.md)。
-
-## 在 Rancher UI 中访问 Fleet
-
-Fleet 预装在 Rancher 中,通过 Rancher UI 中的**持续交付**选项管理。有关持续交付和 Fleet 故障排除技巧的更多信息,请参阅[此处](https://fleet.rancher.io/troubleshooting)。
-
-用户可以通过遵循 **gitops** 的实践,利用持续交付将应用部署到 git 仓库中的 Kubernetes 集群,而无需任何手动操作。
-
-按照以下步骤在 Rancher UI 中访问持续交付:
-
-1. 单击 **☰ > 持续交付**。
-
-1. 在菜单顶部选择你的命名空间,注意以下几点:
- - 默认情况下会选中 `fleet-default`,其中包括注册到 Rancher 的所有下游集群。
- - 你可以切换到仅包含 `local` 集群的 `fleet-local`,或者创建自己的工作空间,并将集群分配和移动到该工作空间。
- - 然后,你可以单击左侧导航栏上的**集群**来管理集群。
-
-1. 单击左侧导航栏上的 **Git 仓库**将 git 仓库部署到当前工作空间中的集群中。
-
-1. 选择你的 [git 仓库](https://fleet.rancher.io/gitrepo-add)和[目标集群/集群组](https://fleet.rancher.io/gitrepo-targets)。你还可以单击左侧导航栏中的**集群组**在 UI 中创建集群组。
-
-1. 部署 git 仓库后,你可以通过 Rancher UI 监控应用。
-
-## Windows 支持
-
-有关对具有 Windows 节点的集群的支持的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md)。
-
-
-## GitHub 仓库
-
-你可以单击此处获取 [Fleet Helm Chart](https://github.com/rancher/fleet/releases/latest)。
-
-
-## 在代理后使用 Fleet
-
-有关在代理后使用 Fleet 的详细信息,请参阅[此页面](../../../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md)。
-
-## Helm Chart 依赖
-
-由于用户需要完成依赖列表,因此为了成功部署具有依赖项的 Helm Chart,你必须手动运行命令(如下所列)。如果你不这样做,并继续克隆仓库并运行 `helm install`,由于依赖项将丢失,因此你的安装将失败。
-
-git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖项。你必须手动运行 `helm dependencies update $chart`,或在本地运行 `helm dependencies build $chart`,然后将完整的 Chart 目录提交到你的 git 仓库。请注意,你需要使用适当的参数来修改命令。
-
-## 故障排除
-
----
-* **已知问题**:Fleet git 仓库的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。如果我们有了永久的解决方案,我们将通知社区。
-
-* **临时解决方法:**
- 默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则需要重新创建密文。要修改 resourceSet 以包含需要备份的其他资源,请参阅[此文档](https://github.com/rancher/backup-restore-operator#user-flow)。
-
----
-
-## 文档
-
-Fleet 文档链接:[https://fleet.rancher.io/](https://fleet.rancher.io/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
deleted file mode 100644
index 113a30ad48b..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md
+++ /dev/null
@@ -1,175 +0,0 @@
----
-title: 多集群应用
----
-
-通常,大多数应用都部署在单个 Kubernetes 集群上,但有时你可能需要跨不同集群和/或项目部署同一应用的多个副本。在 Rancher 中,_多集群应用_ 指的是使用 Helm Chart 跨多个集群部署的应用。由于能够跨多个集群部署相同的应用,因此可以避免在每个集群上重复执行相同的应用配置操作而引入的人为错误。使用多集群应用,你可以通过自定义在所有项目/集群中使用相同的配置,并根据你的目标项目更改配置。由于多集群应用被视为单个应用,因此更容易管理和维护。
-
-全局应用商店中的任何 Helm Chart 都可用于部署和管理多集群应用。
-
-创建多集群应用后,你可以对全局 DNS 条目进行编程,以便更轻松地访问应用。
-
-## 先决条件
-
-### 权限
-
-要在 Rancher 中创建多集群应用,你至少需要具有以下权限之一:
-
-- 目标集群中的[项目成员角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),能够创建、读取、更新和删除工作负载
-- 目标项目所在集群的[集群所有者角色](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)
-
-### 启用旧版功能
-
-由于 Rancher 2.5 已弃用多集群应用并使用 Fleet 取代它,你需要使用功能开关以启用多集群应用。
-
-1. 在左上角,单击 **☰ > 全局设置**。
-1. 单击**功能开关**。
-1. 转到 `Legacy` 功能开关并单击**激活**。
-
-## 启动多集群应用
-
-1. 在左上角,单击**☰ > 多集群应用**。
-1. 点击**启动**。
-1. 找到要启动的应用。
-1. (可选)查看来自 Helm Chart `README` 的详细描述。
-1. 在**配置选项**下输入多集群应用的**名称**。默认情况下,此名称还用于在每个[目标项目](#目标)中为多集群应用创建一个 Kubernetes 命名空间。命名空间命名为 `-`。
-1. 选择一个**模板版本**。
-1. 完成[多集群应用配置选项](#多集群应用配置选项)以及[应用配置选项](#应用配置选项)。
-1. 选择可以[与多集群应用交互](#成员)的**成员**。
-1. 添加[自定义应用配置答案](#覆盖特定项目的应用配置选项),这将更改默认应用配置答案中特定项目的配置。
-1. 查看**预览**中的文件。确认后,单击**启动**。
-
-**结果**:应用已部署到所选的命名空间。你可以从项目中查看应用状态。
-
-## 多集群应用配置选项
-
-Rancher 将多集群应用的配置选项分为以下几个部分。
-
-### 目标
-
-在**目标**部分中,选择用于部署应用的项目。项目列表仅显示你有权访问的项目。所选的每个项目都会被添加到列表中,其中显示了所选的集群名称和项目名称。要移除目标项目,单击 **-**。
-
-### 升级
-
-在**升级**部分中,选择升级应用时需要使用的升级策略。
-
-* **滚动更新(批量)**:选择此升级策略时,每次升级的应用数量取决于选择的**批量大小**和**间隔**(多少秒后才开始下一批更新)。
-
-* **同时升级所有应用**:选择此升级策略时,所有项目的所有应用都将同时升级。
-
-### 角色
-
-在**角色**中,你可以定义多集群应用的角色。通常,当用户[启动商店应用](../../../pages-for-subheaders/helm-charts-in-rancher.md)时,该用户的权限会用于创建应用所需的所有工作负载/资源。
-
-多集群应用由 _系统用户_ 部署,系统用户还被指定为所有底层资源的创建者。由于实际用户可以从某个目标项目中删除,因此使用 _系统用户_ 而不是实际用户。如果实际用户从其中一个项目中删除,则该用户将不再能够管理其他项目的应用。
-
-Rancher 允许你选择**项目**或**集群**的角色选项。Rancher 将允许你根据用户的权限使用其中一个角色进行创建。
-
-- **项目** - 相当于[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[项目成员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _项目成员_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色),则认为该用户具有所需的权限级别。
-
-- **集群** - 相当于[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)。如果你选择此角色,Rancher 将检查用户是否在所有目标项目中至少具有[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)的角色。虽然用户可能没有被明确授予 _集群所有者_ 角色,但如果用户是[管理员](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md),则认为该用户具有所需的权限级别。
-
-在启动应用时,Rancher 会在启动应用之前确认你在目标项目中是否拥有这些权限。
-
-:::note
-
-某些应用(如 _Grafana_ 或 _Datadog_)需要访问特定集群级别的资源。这些应用将需要 _集群_ 角色。如果你之后发现应用需要集群角色,则可以升级多集群应用以更新角色。
-
-:::
-
-## 应用配置选项
-
-对于每个 Helm Chart,你需要输入一个必须的答案列表才能成功部署 Chart。由于 Rancher 会将答案作为 `--set` 标志传递给 Helm,因此你必须按照[使用 Helm:–set 的格式和限制](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set)中的语法规则来格式化这些答案。
-
-:::note 示例
-
-当输入的答案包含用逗号分隔的两个值(即 `abc, bcd`)时,你需要用双引号将值括起来(即 ``"abc, bcd" ``)。
-
-:::
-
-### 使用 questions.yml 文件
-
-如果你部署的 Helm Chart 包含 `questions.yml` 文件,Rancher UI 会将此文件转换成易于使用的 UI 来收集问题的答案。
-
-### 原生 Helm Chart 的键值对
-
-对于原生 Helm Chart(即来自 **Helm Stable** 或 **Helm Incubator** 应用商店或自定义 Helm Chart 仓库的 Chart),答案会在 **Answers** 中以键值对的形式提供。这些答案能覆盖默认值。
-
-### 成员
-
-默认情况下,多集群应用只能由应用的创建者管理。你可以在**成员**中添加其他用户,以便这些用户管理或查看多集群应用。
-
-1. 在**成员**搜索框中键入成员的名称,查找要添加的用户。
-
-2. 为该成员选择**访问类型**。多集群项目有三种访问类型,请仔细阅读并了解这些访问类型的含义,以了解多集群应用权限的启用方式。
-
- - **所有者**:此访问类型可以管理多集群应用的任何配置,包括模板版本、[多集群应用配置选项](#多集群应用配置选项),[应用配置选项](#应用配置选项),可以与多集群应用交互的成员,以及[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _所有者_ 都可以管理/删除[目标项目](#目标)中的应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **成员**:此访问类型只能修改模板版本、[应用配置选项](#应用配置选项)和[自定义应用配置答案](#覆盖特定项目的应用配置选项)。由于多集群应用的创建使用与用户不同的权限集,因此多集群应用的任何 _成员_ 都可以修改应用,而不需要显式授权访问这些项目。请仅为受信任的用户配置此访问类型。
-
- - **只读**:此访问类型不能修改多集群应用的任何配置选项。用户只能查看这些应用。
-
- :::caution
-
- 请确保仅为受信任的用户授予 _所有者_ 或 _成员_ 访问权限,因为这些用户即使无法直接访问项目,也将自动能够管理为此多集群应用创建的应用。
-
- :::
-
-### 覆盖特定项目的应用配置选项
-
-多集群应用的主要优势之一,是能够在多个集群/项目中使用相同配置部署相同的应用。在某些情况下,你可能需要为某个特定项目使用稍微不同的配置选项,但你依然希望统一管理该应用与其他匹配的应用。此时,你可以为该项目覆盖特定的[应用配置选项](#应用配置选项),而不需要创建全新的应用。
-
-1. 在**答案覆盖**中,单击**添加覆盖**。
-
-2. 对于每个覆盖,你可以选择以下内容:
-
- - **范围**:在配置选项中选择要覆盖哪些目标项目的答案。
-
- - **问题**:选择要覆盖的问题。
-
- - **答案**:输入要使用的答案。
-
-## 升级多集群应用角色和项目
-
-- **在现有的多集群应用上更改角色**
- 多集群应用的创建者和任何具有“所有者”访问类型的用户都可以升级其**角色**。添加新角色时,我们会检查用户在所有当前目标项目中是否具有该角色。Rancher 会根据 `Roles` 字段的安装部分,相应地检查用户是否具有全局管理员、集群所有者或项目所有者的角色。
-
-- **添加/删除目标项目**
-1. 多集群应用的创建者和任何具有“所有者”访问类型的用户都添加或移除目标项目。添加新项目时,我们检查此请求的调用者是否具有多集群应用中定义的所有角色。Rancher 会检查用户是否具有全局管理员、集群所有者和项目所有者的角色。
-2. 删除目标项目时,我们不会进行这些成员资格检查。这是因为调用者的权限可能与目标项目有关,或者由于该项目已被删除导致调用者希望将该项目从目标列表中删除。
-
-
-## 多集群应用管理
-
-与同一类型的多个单独应用相比,使用多集群应用的好处之一是易于管理。你可以克隆、升级或回滚多集群应用。
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要对其执行操作的多集群应用,然后单击 **⋮**。选择以下选项之一:
-
- * **克隆**:创建另一个具有相同配置的多集群应用。通过使用此选项,你可以轻松复制多集群应用。
- * **升级**:升级多集群应用以更改某些配置。在为多集群应用执行升级时,如果你有合适的[访问类型](#成员),则可以修改[升级策略](#升级)。
- * **回滚**:将你的应用回滚到特定版本。如果你的一个或多个[目标](#目标)的多集群应用在升级后出现问题,你可以使用 Rancher 存储的多达 10 个多集群应用版本进行回滚。回滚多集群应用会恢复**所有**目标集群和项目的应用,而不仅仅是受升级问题影响的目标。
-
-## 删除多集群应用
-
-:::note 先决条件:
-
-`Legacy` 功能开关已启用。
-
-:::
-
-1. 在左上角,单击**☰ > 多集群应用**。
-
-2. 选择要删除的多集群应用,然后单击**⋮ > 删除**。删除多集群应用会删除所有目标项目中的所有应用和命名空间。
-
- :::note
-
- 不能独立删除在目标项目中为多集群应用创建的应用。只有删除多集群应用后才能删除这些应用。
-
- :::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
index 277961c5dae..a8a10335f6b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
@@ -6,7 +6,7 @@ title: 为高可用 K3s Kubernetes 集群设置基础设施
我们根据 Rancher 的安装位置(K3s Kubernetes 集群、RKE Kubernetes 集群或单个 Docker 容器)为专用于 Rancher 的 Kubernetes 集群推荐不同基础设施。
-有关每个安装选项的详情,请参见[本页](../../../pages-for-subheaders/installation-and-upgrade.md)。
+有关每个安装选项的详情,请参见[本页](../../../getting-started/installation-and-upgrade/installation-and-upgrade.md)。
:::note 重要提示:
@@ -21,13 +21,13 @@ title: 为高可用 K3s Kubernetes 集群设置基础设施
- **1 个负载均衡器**:用于将流量转发到这两个节点中。
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置外部数据库
+## 2. 配置外部数据库
K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
@@ -39,7 +39,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
-### 3. 配置负载均衡器
+## 3. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -62,7 +62,7 @@ K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
index 24387c1f786..fb64922e7eb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
@@ -18,19 +18,19 @@ title: 为高可用 RKE Kubernetes 集群设置基础设施
这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
-### 为什么使用三个节点?
+## 为什么使用三个节点?
在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到三个节点中的任意一个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -53,7 +53,7 @@ title: 为高可用 RKE Kubernetes 集群设置基础设施
:::
-### 3. 配置 DNS 记录
+## 3. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
index 0af8f17b3bb..b474cf073db 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
@@ -18,13 +18,13 @@ title: 为高可用 RKE2 Kubernetes 集群设置基础设施
- **1 个负载均衡器**:用于将流量转发到这两个节点中。
- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
-### 1. 配置 Linux 节点
+## 1. 配置 Linux 节点
-请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)的常规要求。
如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
-### 2. 配置负载均衡器
+## 2. 配置负载均衡器
你还需要设置一个负载均衡器,来将流量重定向到所有节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
@@ -47,7 +47,7 @@ title: 为高可用 RKE2 Kubernetes 集群设置基础设施
:::
-### 4. 配置 DNS 记录
+## 4. 配置 DNS 记录
配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
index 21294b1016e..bb480d6fb63 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
@@ -2,7 +2,7 @@
title: 在 Amazon EC2 中配置节点
---
-在本教程中,你将学习一种为 Rancher Mangement Server 创建 Linux 节点的方法。这些节点将满足[操作系统、Docker、硬件和网络的要求](../../../pages-for-subheaders/installation-requirements.md)。
+在本教程中,你将学习一种为 Rancher Mangement Server 创建 Linux 节点的方法。这些节点将满足[操作系统、Docker、硬件和网络的要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
如果 Rancher Server 安装在 RKE Kubernetes 集群上,你需要配置三个实例。
@@ -10,12 +10,12 @@ title: 在 Amazon EC2 中配置节点
如果 Rancher Server 安装在单个 Docker 容器中,你只需要配置一个实例。
-### 1. 准备工作(可选)
+## 1. 准备工作(可选)
-- **创建 IAM 角色**:要允许 Rancher 操作 AWS 资源,例如创建新存储或新节点,你需要将 Amazon 配置为云提供商。要在 EC2 上设置云提供商,你需要进行几个操作,其中包括为 Rancher Server 节点设置 IAM 角色。有关设置云提供商的详情,请参见[本页](../../../pages-for-subheaders/set-up-cloud-providers.md)。
-- **创建安全组**:我们建议为 Rancher 节点设置一个符合 [Rancher 节点端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+- **创建 IAM 角色**:要允许 Rancher 操作 AWS 资源,例如创建新存储或新节点,你需要将 Amazon 配置为云提供商。要在 EC2 上设置云提供商,你需要进行几个操作,其中包括为 Rancher Server 节点设置 IAM 角色。有关设置云提供商的详情,请参见[本页](../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。
+- **创建安全组**:我们建议为 Rancher 节点设置一个符合 [Rancher 节点端口要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)的安全组。
-### 2. 配置实例
+## 2. 配置实例
1. 登录到 [Amazon AWS EC2 控制台](https://console.aws.amazon.com/ec2/)。由于 Rancher Management Server 的所有基础设施都需要位于同一区域,因此,请务必记下创建 EC2 实例(Linux 节点)的**区域**。
1. 在左侧面板中,点击**实例**。
@@ -26,7 +26,7 @@ title: 在 Amazon EC2 中配置节点
1. 在**实例数量**字段中,输入实例数量。创建高可用 K3s 集群仅需要两个实例,而高可用 RKE 集群则需要三个实例。
1. 可选:如果你为 Rancher 创建了一个 IAM 角色来操作 AWS 资源,请在 **IAM 角色**字段中选择新 IAM 角色。
1. 分别点击**下一步:添加存储**,**下一步:添加标签**和**下一步:配置安全组**。
-1. 在**步骤 6:配置安全组**中,选择一个符合 Rancher 节点[端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+1. 在**步骤 6:配置安全组**中,选择一个符合 Rancher 节点[端口要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#端口要求)的安全组。
1. 点击**查看并启动**。
1. 点击**启动**。
1. 选择一个新的或现有的密钥对,用于之后连接到你的实例。如果使用现有密钥对,请确保你有访问私钥的权限。
@@ -41,7 +41,7 @@ title: 在 Amazon EC2 中配置节点
:::
-### 3. 为 RKE Kubernetes 集群节点安装 Docker 并创建用户
+## 3. 为 RKE Kubernetes 集群节点安装 Docker 并创建用户
1. 在 [AWS EC2 控制台](https://console.aws.amazon.com/ec2/)中,点击左侧面板中的**实例**。
1. 转到你想要安装 Docker 的实例。选择实例,并点击**操作 > 连接**。
@@ -67,7 +67,7 @@ sudo usermod -aG docker ubuntu
**结果**:你已配置满足操作系统、Docker、硬件和网络要求的 Rancher Server 节点。
-### RKE Kubernetes 集群节点的后续步骤
+## RKE Kubernetes 集群节点的后续步骤
如需在新节点上安装 RKE 集群,请记住每个节点的 **IPv4 公共 IP** 和 **私有 IP**。创建节点后,此信息可以在每个节点的**描述**选项卡中找到。公共和私有 IP 将用于设置 RKE 集群配置文件 `rancher-cluster.yml` 中每个节点的 `address` 和 `internal_address`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
index 2f28274e31b..52841cefbcf 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
@@ -10,9 +10,9 @@ title: 高可用安装
Rancher Server 的数据存储在 etcd 中。etcd 数据库可以在所有三个节点上运行。为了选举出大多数 etcd 节点认同的 etcd 集群 leader,节点的数量需要是奇数。如果 etcd 数据库不能选出 leader,etcd 可能会失败。这时候就需要使用备份来还原集群。
-有关 Rancher 如何工作的详情(与安装方法无关),请参见[架构](../../../pages-for-subheaders/rancher-manager-architecture.md)。
+有关 Rancher 如何工作的详情(与安装方法无关),请参见[架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)。
-### 推荐架构
+## 推荐架构
- Rancher 的 DNS 应该解析为 4 层负载均衡器。
- 负载均衡器应该把 TCP/80 端口和 TCP/443 端口的流量转发到 Kubernetes 集群的全部 3 个节点上。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
index 397fe2aa0d0..0ad61709567 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
@@ -10,7 +10,7 @@ Rancher 可以运行在任何 Kubernetes 集群上,包括托管的 Kubernetes
:::
-如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
:::tip 单节点安装提示:
@@ -189,5 +189,5 @@ kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed
### 后续操作
-[安装 Rancher](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)
+[安装 Rancher](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index 313585d60dd..7ca1256f7e4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -14,15 +14,15 @@ title: 生产就绪集群检查清单
如需获取推荐的所有最佳实践的完整列表,请参阅[最佳实践](../../../../reference-guides/best-practices/best-practices.md)部分。
-### 节点要求
+## 节点要求
- 确保你的节点满足所有[节点要求](../node-requirements-for-rancher-managed-clusters.md),包括端口要求。
-### 备份 etcd
+## 备份 etcd
* 启用 etcd 快照。验证是否正在创建快照,并执行灾难恢复方案,从而验证快照是否有效。etcd 是存储集群状态的位置,丢失 etcd 数据意味着丢失集群。因此,请确保为集群配置 etcd 的定期快照,并确保快照也是存储在外部(节点外)的。
-### 集群架构
+## 集群架构
* 节点应具有以下角色配置之一:
* `etcd`
@@ -37,16 +37,16 @@ title: 生产就绪集群检查清单
有关每个 Kubernetes 角色的节点数的详细信息,请参阅[推荐架构](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md)部分。
-### Logging 和 Monitoring
+## Logging 和 Monitoring
* 为 Kubernetes 组件(系统服务)配置告警/通知程序。
* 为集群分析和事后剖析配置 Logging。
-### 可靠性
+## 可靠性
* 在集群上执行负载测试,以验证硬件是否可以支持你的工作负载。
-### 网络
+## 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 (../set-up-cloud-providers/set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
index a0ec43f76c5..6473225d264 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -53,7 +53,7 @@ title: 推荐的集群架构
参考:
-* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance)
+* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.5/faq/#what-is-failure-tolerance)
* [为 Kubernetes 操作 etcd 集群的官方 Kubernetes 文档](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
### Worker 节点数
@@ -62,7 +62,7 @@ title: 推荐的集群架构
### 为什么 Rancher 集群和运行应用的集群的生产要求不同
-你可能已经注意到我们的 [Kubernetes 安装](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)说明并不符合我们对生产就绪集群的要求,这是因为 `worker` 角色没有专用节点。然而,你 Rancher 中的这个三节点集群是有效的,因为:
+你可能已经注意到我们的 [Kubernetes 安装](../../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)说明并不符合我们对生产就绪集群的要求,这是因为 `worker` 角色没有专用节点。然而,你 Rancher 中的这个三节点集群是有效的,因为:
* 它允许一个 `etcd` 节点故障。
* 它通过多个 `controlplane` 节点来维护 master 组件的多个实例。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
index 1f929eb52f9..cac53d51604 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
@@ -4,7 +4,7 @@ title: Kubernetes 中节点的角色
本节介绍 Kubernetes 中 etcd 节点、controlplane 节点和 worker 节点的角色,以及这些角色如何在集群中协同工作。
-此图适用于 [Rancher 通过 RKE 部署的 Kubernetes 集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md):
+此图适用于 [Rancher 通过 RKE 部署的 Kubernetes 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md):

线条表示组件之间的通信。而颜色纯粹用于视觉辅助。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
index 44c89d5068d..f77d15a79c7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
@@ -13,7 +13,7 @@ Rancher 简化了集群的创建,允许你通过 Rancher UI 而不是更复杂
有关 Rancher 服务如何配置集群以及使用哪些工具配置集群的概念性概述,请参阅[架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)页面。
-### 按集群类型划分的集群管理功能
+## 按集群类型划分的集群管理功能
下表总结了每种集群类型的可用选项和设置:
@@ -45,11 +45,11 @@ import ClusterCapabilitiesTable from '../../../shared-files/\_cluster-capabiliti
Rancher 可以在亚马逊 EC2、DigitalOcean、Azure 或 vSphere 等基础设施提供商中动态调配节点,然后在这些节点上安装 Kubernetes。
-使用 Rancher,你可以基于[节点模板](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#node-templates)创建节点池。该模板定义了用于在云提供商中启动节点的参数。
+使用 Rancher,你可以基于[节点模板](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点模板)创建节点池。该模板定义了用于在云提供商中启动节点的参数。
使用基础设施提供商托管的节点的一个好处是,如果某个节点失去了与集群的连接,Rancher 可以自动替换掉它,从而保持预期的集群配置。
-可用于创建节点模板的云提供商是由 Rancher UI 中激活的[节点驱动程序](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#node-drivers)决定的。
+可用于创建节点模板的云提供商是由 Rancher UI 中激活的[节点驱动程序](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#主机驱动)决定的。
有关详细信息,请参阅[基础设施提供商托管的节点](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)部分。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
index 3649e928567..bda5e415a7d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-vsphere.md
@@ -12,7 +12,7 @@ Kubernetes 不再在树内维护云提供商。vSphere 有一个树外云提供
它遵循官方 [vSphere 迁移文档](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html)中提供的步骤,并提供在 Rancher 中执行的步骤。
-### Cloud-config 格式限制
+## Cloud-config 格式限制
由于 vSphere Cloud Storage Interface (CSI) 中的一个现有错误,使用以下 cloud-config 格式配置的现有卷将无法迁移。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
index 85bcf4b7b11..357b8135919 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md
@@ -6,7 +6,7 @@ title: Rancher 管理集群的节点要求
:::note
-如果 Rancher 安装在高可用的 Kubernetes 集群上,Rancher Server 的三节点集群和下游集群有不同的要求。有关 Rancher 的安装要求,请参考[安装文档](../../../pages-for-subheaders/installation-requirements.md)中的节点要求。
+如果 Rancher 安装在高可用的 Kubernetes 集群上,Rancher Server 的三节点集群和下游集群有不同的要求。有关 Rancher 的安装要求,请参考[安装文档](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)中的节点要求。
:::
@@ -43,7 +43,7 @@ SUSE Linux 可能有一个防火墙,默认情况下会阻止所有端口。在
### Flatcar Container Linux 节点
-使用 Flatcar Container Linux 节点[通过 Rancher 启动 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 时,需要在 [Cluster Config 文件](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)中使用如下配置:
+使用 Flatcar Container Linux 节点[通过 Rancher 启动 Kubernetes](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 时,需要在 [Cluster Config 文件](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)中使用如下配置:
@@ -88,13 +88,13 @@ rancher_kubernetes_engine_config:
systemctl enable docker.service
```
-使用[主机驱动](../../../pages-for-subheaders/about-provisioning-drivers.md#主机驱动)时会自动启用 Docker 服务。
+使用[主机驱动](../authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)时会自动启用 Docker 服务。
### Windows 节点
运行 Windows Server 节点必须使用 Docker 企业版。
-Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集群](../../../pages-for-subheaders/use-windows-clusters.md)。
+Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集群](../kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md)。
## 硬件要求
@@ -104,13 +104,13 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
有关大型 Kubernetes 集群的硬件建议,请参阅[构建大型集群](https://kubernetes.io/docs/setup/best-practices/cluster-large/)的官方 Kubernetes 文档。
-有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.4.0/op-guide/hardware/)。
+有关生产环境中 etcd 集群的硬件建议,请参阅官方 [etcd 文档](https://etcd.io/docs/v3.5/op-guide/hardware/)。
## 网络要求
对于生产集群,我们建议你通过仅打开以下端口要求中定义的端口来限制流量。
-需要开放的端口根据下游集群的启动方式而有所不同。以下列出了需要为不同[集群创建选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)打开的端口。
+需要开放的端口根据下游集群的启动方式而有所不同。以下列出了需要为不同[集群创建选项](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)打开的端口。
有关 Kubernetes 集群中 etcd 节点、controlplane 节点和 Worker 节点的端口要求的详细信息,请参阅 [Rancher Kubernetes Engine 的端口要求](https://rancher.com/docs/rke/latest/en/os/#ports)。
@@ -126,4 +126,4 @@ Windows 节点只能用于 Worker 节点。请参阅[配置 Windows 自定义集
如果你要配置符合 CIS(互联网安全中心)Kubernetes 基准的 Kubernetes 集群,我们建议你在安装 Kubernetes 之前按照我们的强化指南来配置节点。
-有关强化指南的更多信息,以及了解哪个指南版本对应于你的 Rancher 和 Kubernetes 版本,请参阅[安全](../../../pages-for-subheaders/rancher-security.md#rancher-强化指南)。
+有关强化指南的更多信息,以及了解哪个指南版本对应于你的 Rancher 和 Kubernetes 版本,请参阅[安全](../../../reference-guides/rancher-security/rancher-security.md#rancher-加固指南)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
index e3857d23024..23d87903d6c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
@@ -13,7 +13,7 @@ Rancher 管理注册集群的范围取决于集群的类型。详情请参见[
已注册的 RKE Kubernetes 集群必须具有所有三个节点角色,分别是 etcd、controlplane 和 worker。只有 controlplane 组件的集群无法在 Rancher 中注册。
-有关 RKE 节点角色的更多信息,请参阅[最佳实践](../../../pages-for-subheaders/checklist-for-production-ready-clusters.md#集群架构)。
+有关 RKE 节点角色的更多信息,请参阅[最佳实践](./checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md#集群架构)。
### 权限
@@ -106,9 +106,9 @@ Rancher 管理注册集群的范围取决于集群的类型。
注册集群后,集群所有者可以:
- 通过 RBAC [管理集群访问](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)
-- 启用[Monitoring、告警和 Notifiers](../../../pages-for-subheaders/monitoring-and-alerting.md)
-- 启用 [Logging](../../../pages-for-subheaders/logging.md)
-- 启用 [Istio](../../../pages-for-subheaders/istio.md)
+- 启用[Monitoring、告警和 Notifiers](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)
+- 启用 [Logging](../../../integrations-in-rancher/logging/logging.md)
+- 启用 [Istio](../../../integrations-in-rancher/istio/istio.md)
- 管理项目和工作负载
### 已注册 RKE2 和 K3s 集群的附加功能
@@ -133,7 +133,7 @@ Rancher 处理注册的 EKS、AKS 或 GKE 集群的方式与处理在 Rancher
如果你在 Rancher 中创建 EKS、AKS 或 GKE 集群,然后将其删除,Rancher 会销毁该集群。通过 Rancher 删除已注册的集群时,Rancher Server 会_断开_与集群的连接。该集群仍然存在,只是它不再在 Rancher 中。你仍然可以像注册前一样访问已注销的集群。
-有关可用于管理已注册集群的功能,请参阅[按集群类型划分的集群管理功能](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)。
+有关可用于管理已注册集群的功能,请参阅[按集群类型划分的集群管理功能](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)。
## 配置 RKE2 和 K3s 集群升级
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
index 3c31569bf95..dc388d67294 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md
@@ -21,7 +21,7 @@ weight: 1
:::
-### 1. 创建 IAM 角色并附加到实例
+## 1. 创建 IAM 角色并附加到实例
添加到集群的所有节点都必须能够与 EC2 交互,以便它们可以创建和删除资源。你可以使用附加到实例的 IAM 角色来启用交互。请参阅 [Amazon 文档:创建 IAM 角色](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) 来创建 IAM 角色。有两个示例策略:
@@ -30,7 +30,7 @@ weight: 1
在创建 [Amazon EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)时,你必须在创建**节点模板**时填写创建的 IAM 角色的 **IAM Instance Profile Name**(不是 ARN)。
-创建[自定义集群](../../../../pages-for-subheaders/use-existing-nodes.md)时,你必须手动将 IAM 角色附加到实例。
+创建[自定义集群](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)时,你必须手动将 IAM 角色附加到实例。
具有 `controlplane` 角色的节点的 IAM 策略:
@@ -129,7 +129,7 @@ weight: 1
}
```
-### 2. 创建 ClusterID
+## 2. 创建 ClusterID
以下资源需要使用 `ClusterID` 进行标记:
@@ -155,6 +155,574 @@ weight: 1
**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`.
-### 使用 Amazon Elastic Container Registry (ECR)
+## 使用 Amazon Elastic Container Registry (ECR)
在将[创建 IAM 角色并附加到实例](#1-创建-iam-角色并附加到实例)中的 IAM 配置文件附加到实例时,kubelet 组件能够自动获取 ECR 凭证。使用低于 v1.15.0 的 Kubernetes 版本时,需要在集群中配置 Amazon 云提供商。从 Kubernetes 版本 v1.15.0 开始,kubelet 无需在集群中配置 Amazon 云提供商即可获取 ECR 凭证。
+
+### Using the Out-of-Tree AWS Cloud Provider
+
+
+
+
+1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for the cloud provider to find the instance correctly.
+
+2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object:
+
+```yaml
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ cloud-provider-name: aws
+```
+
+This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally:
+
+
+**Override on Etcd:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/etcd-role
+ operator: In
+ values:
+ - 'true'
+```
+
+**Override on Control Plane:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ disable-cloud-controller: true
+ kube-apiserver-arg:
+ - cloud-provider=external
+ kube-controller-manager-arg:
+ - cloud-provider=external
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/control-plane-role
+ operator: In
+ values:
+ - 'true'
+```
+
+**Override on Worker:**
+
+```yaml
+spec:
+ rkeConfig:
+ machineSelectorConfig:
+ - config:
+ kubelet-arg:
+ - cloud-provider=external
+ machineLabelSelector:
+ matchExpressions:
+ - key: rke.cattle.io/worker-role
+ operator: In
+ values:
+ - 'true'
+```
+
+2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components.
+
+3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install:
+
+```yaml
+spec:
+ rkeConfig:
+ additionalManifest: |-
+ apiVersion: helm.cattle.io/v1
+ kind: HelmChart
+ metadata:
+ name: aws-cloud-controller-manager
+ namespace: kube-system
+ spec:
+ chart: aws-cloud-controller-manager
+ repo: https://kubernetes.github.io/cloud-provider-aws
+ targetNamespace: kube-system
+ bootstrap: true
+ valuesContent: |-
+ hostNetworking: true
+ nodeSelector:
+ node-role.kubernetes.io/control-plane: "true"
+ args:
+ - --configure-cloud-routes=false
+ - --v=5
+ - --cloud-provider=aws
+```
+
+
+
+
+
+1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`.
+
+:::note
+
+If you use IP-based naming, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention.
+
+:::
+
+To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when the `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`:
+
+```yaml
+rancher_kubernetes_engine_config:
+ cloud_provider:
+ name: external-aws
+ useInstanceMetadataHostname: true
+```
+
+You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md), add [`--node-name`](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) to the `docker run` node registration command to set `hostname-override` — for example, `"$(hostname -f)"`. This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**.
+
+2. Select the cloud provider.
+
+Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`.
+
+:::note
+
+You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`.
+
+:::
+
+```yaml
+rancher_kubernetes_engine_config:
+ cloud_provider:
+ name: external-aws
+ useInstanceMetadataHostname: true/false
+```
+
+Existing clusters that use an **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name.
+
+3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done manually, or via [Helm charts in UI](#helm-chart-installation-from-ui).
+
+Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws).
+
+
+
+
+### Helm Chart Installation from CLI
+
+
+
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
+
+1. Add the Helm repository:
+
+```shell
+helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws
+helm repo update
+```
+
+2. Create a `values.yaml` file with the following contents to override the default `values.yaml`:
+
+```yaml
+# values.yaml
+hostNetworking: true
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/control-plane
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+args:
+ - --configure-cloud-routes=false
+ - --use-service-account-credentials=true
+ - --v=2
+ - --cloud-provider=aws
+clusterRoleRules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+```
+
+3. Install the Helm chart:
+
+```shell
+helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml
+```
+
+Verify that the Helm chart installed successfully:
+
+```shell
+helm status -n kube-system aws-cloud-controller-manager
+```
+
+4. (Optional) Verify that the cloud controller manager update succeeded:
+
+```shell
+kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+```
+
+
+
+
+
+Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on GitHub.
+
+1. Add the Helm repository:
+
+```shell
+helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws
+helm repo update
+```
+
+2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`:
+
+```yaml
+# values.yaml
+hostNetworking: true
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/controlplane
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+args:
+ - --configure-cloud-routes=false
+ - --use-service-account-credentials=true
+ - --v=2
+ - --cloud-provider=aws
+clusterRoleRules:
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services/status
+ verbs:
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+```
+
+3. Install the Helm chart:
+
+```shell
+helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml
+```
+
+Verify that the Helm chart installed successfully:
+
+```shell
+helm status -n kube-system aws-cloud-controller-manager
+```
+
+4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`:
+
+```shell
+kubectl edit daemonset aws-cloud-controller-manager -n kube-system
+```
+
+5. (Optional) Verify that the cloud controller manager update succeeded:
+
+```shell
+kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+```
+
+
+
+
+### Helm Chart Installation from UI
+
+
+
+
+1. Click **☰**, then select the name of the cluster from the left navigation.
+
+2. Select **Apps** > **Repositories**.
+
+3. Click the **Create** button.
+
+4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field.
+
+5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**.
+
+6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**.
+
+7. Add the following container arguments:
+
+```yaml
+ - '--use-service-account-credentials=true'
+ - '--configure-cloud-routes=false'
+```
+
+8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup.
+
+```yaml
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+```
+
+9. Rancher-provisioned RKE2 nodes are tainted `node-role.kubernetes.io/control-plane`. Update tolerations and the nodeSelector:
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/control-plane
+
+```
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+```
+
+:::note
+
+There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the Daemonset manually to set the `nodeSelector`:
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/control-plane: 'true'
+```
+
+:::
+
+10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6).
+
+
+
+
+
+1. Click **☰**, then select the name of the cluster from the left navigation.
+
+2. Select **Apps** > **Repositories**.
+
+3. Click the **Create** button.
+
+4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field.
+
+5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**.
+
+6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**.
+
+7. Add the following container arguments:
+
+```yaml
+ - '--use-service-account-credentials=true'
+ - '--configure-cloud-routes=false'
+```
+
+8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup:
+
+```yaml
+ - apiGroups:
+ - ''
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+```
+
+9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector:
+
+```yaml
+tolerations:
+ - effect: NoSchedule
+ key: node.cloudprovider.kubernetes.io/uninitialized
+ value: 'true'
+ - effect: NoSchedule
+ value: 'true'
+ key: node-role.kubernetes.io/controlplane
+
+```
+
+```yaml
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+```
+
+:::note
+
+There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`:
+
+``` yaml
+nodeSelector:
+ node-role.kubernetes.io/controlplane: 'true'
+```
+
+:::
+
+10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully:
+
+```shell
+kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager
+```
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
index 065ff2deb84..cda5dbafed4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md
@@ -8,7 +8,7 @@ Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外
本文遵循官方 [vSphere 迁移文档](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html)中提供的步骤,并介绍了要在 Rancher 中执行的步骤。
-### Cloud-config 格式限制
+## Cloud-config 格式限制
由于 vSphere CSI 中的现有错误,使用以下 cloud-config 格式配置的现有卷将不会迁移。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
index f6aa2871a8c..a50855d7895 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md
@@ -21,23 +21,23 @@ _cloud provider_ 是 Kubernetes 中的一个模块,它提供了一个用于管
* GCE (Google Compute Engine)
* vSphere
-### 设置 Amazon 云提供商
+## 设置 Amazon 云提供商
有关启用 Amazon 云提供商的详细信息,请参阅[此页面](amazon.md)。
-### 设置 Azure 云提供商
+## 设置 Azure 云提供商
有关启用 Azure 云提供商的详细信息,请参阅[此页面](azure.md)。
-### 设置 GCE 云提供商
+## 设置 GCE 云提供商
有关启用 Google Compute Engine 云提供商的详细信息,请参阅[此页面](google-compute-engine.md)。
-### 设置 vSphere 云提供商
+## 设置 vSphere 云提供商
有关启用 vSphere 云提供商的详细信息,请参阅[树内 vSphere 配置](configure-in-tree-vsphere.md) 和 [树外 vSphere 配置](configure-out-of-tree-vsphere.md)。
-### 设置自定义云提供商
+## 设置自定义云提供商
任何 Kubernetes Cloud Provider 都可以通过`自定义`云提供商进行配置。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
index 4369666a95d..d3a15f44ea7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md
@@ -146,7 +146,7 @@ Rancher 可以通过以下两种方式之一连接到私有 AKS 集群。
AKS 配置者可以在 Rancher 和提供商之间同步 AKS 集群的状态。有关其工作原理的技术说明,请参阅[同步](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)。
-有关配置刷新间隔的信息,请参阅[本节](../../../../pages-for-subheaders/gke-cluster-configuration.md#配置刷新间隔)。
+有关配置刷新间隔的信息,请参阅[本节](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md#配置刷新间隔)。
## 以编程方式创建 AKS 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
index ee3f3397dc6..f0168f45262 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md
@@ -59,7 +59,7 @@ title: 创建 GKE 集群
1. 可选:使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 可选:将 Kubernetes [标签](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)或[注释](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)添加到集群。
1. 输入你的 Google 项目 ID 和 Google 云凭证。
-1. 完成表单的其余部分。如需帮助,请参阅 [GKE 集群配置参考](../../../../pages-for-subheaders/gke-cluster-configuration.md)。
+1. 完成表单的其余部分。如需帮助,请参阅 [GKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)。
1. 单击**创建**。
**结果**:你已成功部署 GKE 集群。
@@ -79,7 +79,7 @@ title: 创建 GKE 集群
## 配置参考
-有关在 Rancher 中配置 GKE 集群的详细信息,请参阅[此页面](../../../../pages-for-subheaders/gke-cluster-configuration.md)。
+有关在 Rancher 中配置 GKE 集群的详细信息,请参阅[此页面](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)。
## 更新 Kubernetes 版本
集群的 Kubernetes 版本可以升级到 GKE 集群所在区域或地区中可用的任何版本。升级 Kubernetes 主版本不会自动升级 Worker 节点。节点可以独立升级。
@@ -94,7 +94,7 @@ GKE 在 1.19+ 中取消了基本身份验证。要将集群升级到 1.19+,必
GKE 配置者可以在 Rancher 和提供商之间同步 GKE 集群的状态。有关其工作原理的技术说明,请参阅[同步](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)。
-有关配置刷新间隔的信息,请参阅[本节](../../../../pages-for-subheaders/gke-cluster-configuration.md#配置刷新间隔)。
+有关配置刷新间隔的信息,请参阅[本节](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md#配置刷新间隔)。
## 以编程方式创建 GKE 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
index 3a5ac26045c..bfe8fd56a59 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/network-requirements-for-host-gateway.md
@@ -4,7 +4,7 @@ title: Host Gateway (L2bridge) 的网络要求
本节介绍如何配置使用 *Host Gateway (L2bridge)* 模式的自定义 Windows 集群。
-### 禁用私有 IP 地址检查
+## 禁用私有 IP 地址检查
如果你使用 *Host Gateway (L2bridge)* 模式,并将节点托管在下面列出的云服务上,则必须在启动时禁用 Linux 或 Windows 主机的私有 IP 地址检查。要为每个节点禁用此检查,请按照以下各个云服务对应的说明进行操作:
@@ -14,7 +14,7 @@ title: Host Gateway (L2bridge) 的网络要求
| Google GCE | [为实例启用 IP 转发](https://cloud.google.com/vpc/docs/using-routes#canipforward)(默认情况下,VM 无法转发由另一个 VM 发起的数据包) |
| Azure VM | [启用或禁用 IP 转发](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) |
-### 云托管虚拟机的路由配置
+## 云托管虚拟机的路由配置
如果是使用 Flannel 的 [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) 后端,同一节点上的所有容器都属于私有子网,流量通过主机网络从一个节点上的子网路由到在另一个节点上的子网。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
index 09d36c82e7c..05d121dafd6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md
@@ -21,7 +21,7 @@ Windows 集群的其他要求如下:
有关支持 Windows 的 Kubernetes 功能摘要,请参阅[在 Windows 中使用 Kubernetes 支持的功能和限制](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) 的 Kubernetes 文档,或[在 Kubernetes 中调度 Windows 容器的指南](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/)。
-### RKE2 Windows
+## RKE2 Windows
RKE2 配置功能还包括在 Windows 集群上安装 RKE2。RKE2 的 Windows 功能包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
index 1c4c314708d..ccef1846703 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md
@@ -11,7 +11,7 @@ description: 了解如何添加 SSL 证书或 TLS 证书
:::
-### 1. 创建一个密文
+## 1. 创建一个密文
1. 在左上角,单击 **☰ > 集群管理**。
@@ -23,7 +23,7 @@ description: 了解如何添加 SSL 证书或 TLS 证书
1. 在**证书**字段中,将你的证书复制并粘贴到文本框中(包括标头和页脚),或者单击**从文件读取**选择文件系统上的证书文件。如果可能,我们建议使用**从文件读取**以减少出错的可能性。请注意,证书文件的扩展名是 `.crt`。
1. 单击**创建**。
-### 2. 将密文添加到 Ingress
+## 2. 将密文添加到 Ingress
1. 在左上角,单击 **☰ > 集群管理**。
1. 转到要部署 Ingress 的集群,然后单击**服务发现 > Ingress**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
index 4dd86bb2a6c..8b80ea3272d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md
@@ -30,10 +30,10 @@ title: Kubernetes 资源
Rancher 支持两种类型的负载均衡器:
-- [Layer-4 负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer#四层负载均衡器)
-- [Layer-7 负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#七层负载均衡器)
+- [Layer-4 负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#四层负载均衡器)
+- [Layer-7 负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#七层负载均衡器)
-有关详细信息,请参阅[负载均衡器](load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md)。
+有关详细信息,请参阅[负载均衡器](./load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md)。
#### Ingress
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
index 6fecebc7341..53bde4b952d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md
@@ -17,7 +17,7 @@ description: 阅读此步骤指南以部署工作负载。部署工作负载以
1. 选择现有命名空间,或单击**添加到新命名空间**并输入新命名空间。
-1. 点击**添加端口**进入端口映射,这让你可以访问集群内外的应用程序。如需更多信息,请参阅 [Service](../../../../pages-for-subheaders/workloads-and-pods.md#services)。
+1. 点击**添加端口**进入端口映射,这让你可以访问集群内外的应用程序。如需更多信息,请参阅 [Service](./workloads-and-pods.md#services)。
1. 配置其余选项:
@@ -41,7 +41,7 @@ description: 阅读此步骤指南以部署工作负载。部署工作负载以
- 在 [AWS](https://aws.amazon.com/) 中,节点必须位于同一可用区中并具有附加/分离卷的 IAM 权限。
- - 集群必须使用 [AWS 云提供商](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws)选项。有关启用此选项的更多信息,请参阅[创建 AWS EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)或[创建自定义集群](../../../../pages-for-subheaders/use-existing-nodes.md)。
+ - 集群必须使用 [AWS 云提供商](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws)选项。有关启用此选项的更多信息,请参阅[创建 AWS EC2 集群](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)或[创建自定义集群](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
index 08be0859bb8..35e16f2345a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md
@@ -9,16 +9,16 @@ description: "了解在 Kubernetes 中构建复杂容器化应用程序的两种
你可以使用两种基本结构(pod 和工作负载)在 Kubernetes 中构建复杂的容器化应用程序。构建应用程序后,你可以使用第三种结构(service)在集群中或互联网上公开应用程序。
-### Pod
+## Pod
[_Pod_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) 是一个或多个共享网络命名空间和存储卷的容器。大多数 pod 只有一个容器。因此,我们讨论的 _pod_ 通常等同于 _容器_。扩展 pod 的方式与扩展容器的方式相同,即配置实现服务的同一 pod 的多个实例。通常,Pod 会根据工作负载进行扩展和管理。
-### 工作负载
+## 工作负载
_工作负载_ 是为 pod 设置部署规则的对象。Kubernetes 基于这些规则执行部署,并根据应用程序的当前状态来更新工作负载。
工作负载让你可以定义应用程序调度、扩展和升级的规则。
-#### 工作负载类型
+### 工作负载类型
Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行的类型是:
@@ -42,7 +42,7 @@ Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行
_CronJobs_ 与 Job 类似。但是,CronJob 会基于 cron 的计划运行到完成状态。
-### Services
+## Services
在许多用例中,工作负载必须:
@@ -51,7 +51,7 @@ Kubernetes 将工作负载分为不同的类型。Kubernetes 支持的最流行
你可以通过创建一个 _Service_ 实现这些目的。Service 使用[选择器/标签(查看代码示例)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller)来映射到底层工作负载的 pod。Rancher UI 使用你选择的服务端口和类型来自动创建 service 以及工作负载,从而简化此映射过程。
-#### Service 类型
+### Service 类型
Rancher 中有几种可用的 Service 类型。以下描述来自 [Kubernetes 文档](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)。
@@ -77,6 +77,6 @@ Rancher 中有几种可用的 Service 类型。以下描述来自 [Kubernetes
## 相关链接
-### 外部链接
+## 外部链接
- [Service](https://kubernetes.io/docs/concepts/services-networking/service/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
index 15263fff292..e01f1b6fc24 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/about-rancher-agents.md
@@ -7,17 +7,17 @@ Rancher 管理的集群上部署了两种不同的 Agent 资源:
- [cattle-cluster-agent](#cattle-cluster-agent)
- [cattle-node-agent](#cattle-node-agent)
-有关 Rancher Server 如何配置集群并与集群通信的概述,请参阅[产品架构](../../../pages-for-subheaders/rancher-manager-architecture.md)。
+有关 Rancher Server 如何配置集群并与集群通信的概述,请参阅[产品架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)。
-### cattle-cluster-agent
+## cattle-cluster-agent
-`cattle-cluster-agent` 用于连接 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。`cattle-cluster-agent` 使用 Deployment 资源进行部署。
+`cattle-cluster-agent` 用于连接 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。`cattle-cluster-agent` 使用 Deployment 资源进行部署。
-### cattle-node-agent
+## cattle-node-agent
-`cattle-node-agent` 用于在执行集群操作时与 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群中的节点进行交互。集群操作包括升级 Kubernetes 版本和创建/恢复 etcd 快照。`cattle-node-agent` 使用 DaemonSet 资源进行部署,以确保能在每个节点上运行。当 `cattle-cluster-agent` 不可用时,`cattle-node-agent` 可以作为备选方案,用来连接 [Rancher 启动的 Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。
+`cattle-node-agent` 用于在执行集群操作时与 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群中的节点进行交互。集群操作包括升级 Kubernetes 版本和创建/恢复 etcd 快照。`cattle-node-agent` 使用 DaemonSet 资源进行部署,以确保能在每个节点上运行。当 `cattle-cluster-agent` 不可用时,`cattle-node-agent` 可以作为备选方案,用来连接 [Rancher 启动的 Kubernetes](./launch-kubernetes-with-rancher.md) 集群的 Kubernetes API。
-### 调度规则
+## 调度规则
`cattle-cluster-agent` 使用一组固定的容忍度,或基于应用于 control plane 节点的污点动态添加的容忍度。这种结构允许[基于污点进行驱逐](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions)为 `cattle-cluster-agent` 正常工作。
@@ -28,7 +28,7 @@ Rancher 管理的集群上部署了两种不同的 Agent 资源:
| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **注意**:这些是默认容忍度,并将替换为与 controlplane 节点的污点匹配的容忍度。 `effect:NoSchedule` `key:node-role.kubernetes.io/controlplane` `value:true` `effect:NoSchedule` `key:node-role.kubernetes.io/control-plane` `operator:Exists` `effect:NoSchedule` `key:node-role.kubernetes.io/master` `operator:Exists` |
| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` |
-`cattle-cluster-agent` Deployment 使用 `preferredDuringSchedulingIgnoredDuringExecution` 的首选调度规则,倾向于在具有 `controlplane` 节点的节点上进行调度。当集群中没有可见的 controlplane 节点时(通常是使用[提供商托管的 Kubernetes 的集群](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)),你可以在节点上添加 `cattle.io/cluster-agent=true` 标签,从而优先将 `cattle-cluster-agent` pod 调度到该节点。
+`cattle-cluster-agent` Deployment 使用 `preferredDuringSchedulingIgnoredDuringExecution` 的首选调度规则,倾向于在具有 `controlplane` 节点的节点上进行调度。当集群中没有可见的 controlplane 节点时(通常是使用[提供商托管的 Kubernetes 的集群](../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)),你可以在节点上添加 `cattle.io/cluster-agent=true` 标签,从而优先将 `cattle-cluster-agent` pod 调度到该节点。
有关调度规则的更多信息,请参阅 [Kubernetes:将 Pod 分配给节点](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
index 3941f3b37c3..8c0e9478de5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md
@@ -6,11 +6,11 @@ RKE2,也称为 RKE Government,是一个完全符合标准的 Kubernetes 发
RKE1 和 RKE2 有一些细微的差异,本文将重点介绍这些差异。
-### controlplane 组件
+## controlplane 组件
RKE1 使用 Docker 来部署和管理 controlplane 组件,还使用 Docker 作为 Kubernetes 的容器运行时。相比之下,RKE2 将 controlplane 组件作为由 kubelet 管理的静态 pod 启动。RKE2 的容器运行时是 Containerd,它允许 Mirror 容器镜像仓库等内容。使用 Docker 的 RKE1 不允许 Mirror。
-### Cluster API
+## Cluster API
RKE2/K3s 配置是基于 Cluster API (CAPI) 上游框架之上构建的,这导致 RKE2 配置的集群的行为通常与 RKE1 配置的集群不同。
@@ -22,7 +22,7 @@ RKE2/K3s 配置是基于 Cluster API (CAPI) 上游框架之上构建的,这导
如果你是习惯于 RKE1 配置的用户,请注意新的 RKE2 行为。
-### 名词解释
+## 名词解释
从 RKE1 到 RKE2,某些术语已更改或已不再使用。例如,在 RKE1中,你使用**节点模板**,而在 RKE2 中,你可以在创建或编辑集群时配置集群节点池。另一个例子是 RKE1 中的**节点池(node pool)** 现在在 RKE2 中称为**主机池(machine pool)**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
index 39f8b418151..656d9bd74a6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md
@@ -28,7 +28,7 @@ title: 创建 DigitalOcean 集群
### 2. 使用云凭证创建节点模板
-为 DigitalOcean 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板) 会允许 Rancher 在 DigitalOcean 中配置新节点。其他集群可以复用节点模板。
+为 DigitalOcean 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板) 会允许 Rancher 在 DigitalOcean 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -42,7 +42,7 @@ title: 创建 DigitalOcean 集群
1. 在**集群**页面上,单击**创建**。
1. 单击 **DigitalOcean**。
1. 输入**集群名称**。
-1. 将一个或多个节点池添加到你的集群。将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 将一个或多个节点池添加到你的集群。将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 在**集群配置**中,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 单击**创建**。
@@ -71,7 +71,7 @@ title: 创建 DigitalOcean 集群
1. 单击 **DigitalOcean**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [DigitalOcean 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
index b2918cf475a..c7fe813394f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md
@@ -9,12 +9,12 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
然后,在 Rancher 中创建一个 EC2 集群,并在配置新集群时为集群定义节点池。每个节点池都有一个 etcd、controlplane 或 worker 的 Kubernetes 角色。Rancher 会在新节点上安装 RKE Kubernetes,并为每个节点设置节点池定义的 Kubernetes 角色。
-### 先决条件
+## 先决条件
- **AWS EC2 访问密钥和密文密钥**,用于创建实例。请参阅 [Amazon 文档:创建访问密钥](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey)来创建访问密钥和密文密钥。
- **已创建 IAM 策略**,用于为用户添加的访问密钥和密文密钥。请参阅 [Amazon 文档:创建 IAM 策略(控制台)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start)来创建 IAM 策略。参阅下面的三个示例 JSON 策略:
- [IAM 策略示例](#iam-策略示例)
- - [带有 PassRole 的 IAM 策略示例](#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
+ - [带有 PassRole 的 IAM 策略示例](#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
- [允许加密 EBS 卷的 IAM 策略示例](#允许加密-ebs-卷的-iam-策略示例)
- 为用户添加 **IAM 策略权限**。请参阅 [Amazon 文档:为用户添加权限(控制台)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console),来将权限添加给用户。
@@ -44,7 +44,7 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
### 2. 使用云凭证和 EC2 的信息来创建节点模板
-为 EC2 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 EC2 中配置新节点。其他集群可以复用节点模板。
+为 EC2 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 EC2 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -60,14 +60,14 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
### 3. 使用节点模板创建具有节点池的集群
-将一个或多个节点池添加到你的集群。有关节点池的更多信息,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+将一个或多个节点池添加到你的集群。有关节点池的更多信息,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 点击 **☰ > 集群管理**。
1. 在**集群**页面上,单击**创建**。
1. 单击 **Amazon EC2**。
-1. 为每个 Kubernetes 角色创建一个节点池。为每个节点池选择你已创建的节点模板。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 为每个 Kubernetes 角色创建一个节点池。为每个节点池选择你已创建的节点模板。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
-1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。参见[选择云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md)来配置 Kubernetes 云提供商。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
+1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。参见[选择云提供商](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)来配置 Kubernetes 云提供商。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
:::note
@@ -103,7 +103,7 @@ description: 了解使用 Rancher 创建 Amazon EC2 集群所需的先决条件
1. 单击 **Amazon EC2**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [EC2 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
index a3af5bca578..e9e2c6fbfb8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md
@@ -65,7 +65,7 @@ az ad sp create-for-rbac \
### 2. 使用云凭证创建节点模板
-为 Azure 创建[节点模板](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Azure 中配置新节点。其他集群可以复用节点模板。
+为 Azure 创建[节点模板](./use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Azure 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -81,7 +81,7 @@ az ad sp create-for-rbac \
1. 在**集群**页面上,单击**创建**。
1. 单击 **Azure**。
1. 输入**集群名称**。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池及其最佳实践的更多信息,请参阅[本节](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池及其最佳实践的更多信息,请参阅[本节](./use-new-nodes-in-an-infra-provider.md)。
1. 在**集群配置**中,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 单击**创建**。
@@ -112,7 +112,7 @@ az ad sp create-for-rbac \
1. 单击 **Azure**。
1. 选择一个**云凭证**。如果存在多个则需要选择。否则,它是预选的。
1. 输入**集群名称**。
-1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
+1. 为每个 Kubernetes 角色创建一个主机池。请参阅[最佳实践](./use-new-nodes-in-an-infra-provider.md#节点角色)了解角色分配和计数的建议。
1. 为每个主机池定义主机配置。有关配置选项的信息,请参阅 [Azure 主机配置参考](../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md)。
1. 使用**集群配置**,选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。有关配置集群的帮助,请参阅 [RKE2 集群配置参考](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
index 885bbb38847..83d63e83a38 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/nutanix.md
@@ -13,9 +13,9 @@ Rancher 可以在 AOS (AHV) 中配置节点并在其上安装 Kubernetes。在 A
Nutanix 集群可能由多组具有不同属性(例如内存或 vCPU 数量)的 VM 组成。这种分组允许对每个 Kubernetes 角色的节点大小进行细粒度控制。
-- [创建 Nutanix 集群](provision-kubernetes-clusters-in-aos.md#creating-a-nutanix-aos-cluster)
-- [配置存储](provision-kubernetes-clusters-in-aos.md)
+- [创建 Nutanix 集群](./provision-kubernetes-clusters-in-aos.md#创建-nutanix-aos-集群)
+- [配置存储](./provision-kubernetes-clusters-in-aos.md)
## 创建 Nutanix 集群
-在[本节](provision-kubernetes-clusters-in-aos.md)中,你将学习如何使用 Rancher 在 Nutanix AOS 中安装 [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes 集群。
+在[本节](./provision-kubernetes-clusters-in-aos.md)中,你将学习如何使用 Rancher 在 Nutanix AOS 中安装 [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes 集群。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
index c360364aa91..f9904bede81 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md
@@ -51,7 +51,7 @@ title: 在 Nutanix AOS 中配置 Kubernetes 集群
### 1. 创建节点模板
-为 Nutanix AOS 创建[节点模板](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Nutanix AOS 中配置新节点。其他集群可以复用节点模板。
+为 Nutanix AOS 创建[节点模板](../use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 Nutanix AOS 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -71,7 +71,7 @@ title: 在 Nutanix AOS 中配置 Kubernetes 集群
1. 输入**集群名称**,然后点击**继续**。
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../use-new-nodes-in-an-infra-provider.md#节点池)。
1. 检查并确认你的选项。然后单击**创建**。
**结果**:集群已创建,并处于 **Provisioning** 状态。Rancher 已在你的集群中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
index 806090bff7c..6defefed5a6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md
@@ -65,7 +65,7 @@ title: 在 vSphere 中配置 Kubernetes 集群
### 2. 使用云凭证创建节点模板
-为 vSphere 创建[节点模板](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 vSphere 中配置新节点。其他集群可以复用节点模板。
+为 vSphere 创建[节点模板](../use-new-nodes-in-an-infra-provider.md#节点模板)会允许 Rancher 在 vSphere 中配置新节点。其他集群可以复用节点模板。
1. 点击 **☰ > 集群管理**。
1. 单击 **RKE1 配置 > 节点模板**。
@@ -86,7 +86,7 @@ title: 在 vSphere 中配置 Kubernetes 集群
1. 使用**成员角色**为集群配置用户授权。点击**添加成员**添加可以访问集群的用户。使用**角色**下拉菜单为每个用户设置权限。
1. 使用**集群选项**选择要安装的 Kubernetes 版本、要使用的网络提供商,以及是否启用项目网络隔离。要查看更多集群选项,请单击**显示高级选项**。如需获取配置集群的帮助,请参阅 [RKE 集群配置参考](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
1. 如果你想稍后动态配置持久存储或其他基础设施,你需要修改集群 YAML 文件来启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
-1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)。
+1. 将一个或多个节点池添加到你的集群。每个节点池都使用节点模板来配置新节点。有关节点池的更多信息,包括为节点分配 Kubernetes 角色的最佳实践,请参阅[本节](../use-new-nodes-in-an-infra-provider.md#节点池)。
1. 检查并确认你的选项。然后单击**创建**。
**结果**:
@@ -107,4 +107,4 @@ title: 在 vSphere 中配置 Kubernetes 集群
- **通过 kubectl CLI 访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#在工作站使用-kubectl-访问集群)在你的工作站上使用 kubectl 访问集群。在这种情况下,你将通过 Rancher Server 的身份验证代理进行身份验证,然后 Rancher 会让你连接到下游集群。此方法允许你在没有 Rancher UI 的情况下管理集群。
- **通过 kubectl CLI 使用授权的集群端点访问你的集群**:按照[这些步骤](../../../../new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)直接使用 kubectl 访问集群,而无需通过 Rancher 进行身份验证。我们建议设置此替代方法来访问集群,以便在无法连接到 Rancher 时访问集群。
-- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../../../pages-for-subheaders/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
+- **配置存储**:有关如何使用 Rancher 在 vSphere 中配置存储的示例,请参阅[本节](../../../manage-clusters/provisioning-storage-examples/provisioning-storage-examples.md)。要在 vSphere 中动态配置存储,你必须启用 vSphere 云提供商。有关更多信息,请参阅[树内 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)和[树外 vSphere cloud provider 文档](../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
index aef1f3c08b4..88944e339fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md
@@ -31,7 +31,7 @@ title: 将用户添加到集群
如果配置了外部身份验证:
- - 在你键入时,Rancher 会从你的[外部身份验证](../../../../pages-for-subheaders/authentication-config.md)源返回用户。
+ - 在你键入时,Rancher 会从你的[外部身份验证](../../authentication-permissions-and-global-configuration/authentication-config/authentication-config.md)源返回用户。
:::note 使用 AD 但找不到你的用户?
@@ -43,7 +43,7 @@ title: 将用户添加到集群
:::note
- 如果你以本地用户身份登录,外部用户不会显示在你的搜索结果中。有关详细信息,请参阅[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+ 如果你以本地用户身份登录,外部用户不会显示在你的搜索结果中。有关详细信息,请参阅[外部身份验证配置和主体用户](../../authentication-permissions-and-global-configuration/authentication-config/authentication-config.md#外部认证配置和用户主体)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
index 364a4012eee..587d3b1fc3b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md
@@ -4,7 +4,7 @@ title: 授权集群端点的工作原理
本文介绍 kubectl CLI、kubeconfig 文件和授权集群端点如何协同工作,使你可以直接访问下游 Kubernetes 集群,而无需通过 Rancher Server 进行身份验证。本文旨在为[设置 kubectl 以直接访问集群的说明](use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)提供背景信息和上下文。
-### Kubeconfig 文件说明
+## Kubeconfig 文件说明
kubeconfig 文件是与 kubectl 命令行工具(或其他客户端)结合使用时用于配置 Kubernetes 访问的文件。
@@ -19,11 +19,11 @@ kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**
下载 kubeconfig 文件后,你将能够使用 kubeconfig 文件及其 Kubernetes [上下文](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration)访问下游集群。
-如果管理员[关闭了 kubeconfig 令牌生成](../../../../reference-guides/about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](./authorized-cluster-endpoint.md) 存在于你的 PATH 中。
+如果管理员[关闭了 kubeconfig 令牌生成](../../../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),则 kubeconfig 文件要求 [Rancher CLI](../../../../reference-guides/cli-with-rancher/rancher-cli.md) 存在于你的 PATH 中。
-### RKE 集群的两种身份验证方法
+## RKE 集群的两种身份验证方法
-如果集群不是 [RKE 集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md),kubeconfig 文件只允许你以一种方式访问集群,即通过 Rancher Server 进行身份验证,然后 Rancher 允许你在集群上运行 kubectl 命令。
+如果集群不是 [RKE 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md),kubeconfig 文件只允许你以一种方式访问集群,即通过 Rancher Server 进行身份验证,然后 Rancher 允许你在集群上运行 kubectl 命令。
对于 RKE 集群,kubeconfig 文件允许你通过两种方式进行身份验证:
@@ -36,7 +36,7 @@ kubeconfig 文件及其内容特定于各个集群。你可以从 Rancher 的**
[架构介绍](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md)也详细解释了这些与下游 Kubernetes 集群通信的方法,并介绍了 Rancher 的工作原理以及 Rancher 如何与下游集群通信的详细信息。
-### 关于 kube-api-auth 身份验证 Webhook
+## 关于 kube-api-auth 身份验证 Webhook
`kube-api-auth` 微服务是为[授权集群端点](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点)提供用户认证功能而部署的。当你使用 `kubectl` 访问下游集群时,集群的 Kubernetes API server 会使用 `kube-api-auth` 服务作为 webhook 对你进行身份验证。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
index ac6a4a338fb..0cc365ead17 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md
@@ -7,7 +7,7 @@ description: "了解如何通过 kubectl Shell 使用 kubectl,或通过 kubect
有关使用 kubectl 的更多信息,请参阅 [Kubernetes 文档:kubectl 概述](https://kubernetes.io/docs/reference/kubectl/overview/)。
-### 在 Rancher UI 中使用 kubectl shell 访问集群
+## 在 Rancher UI 中使用 kubectl shell 访问集群
你可以通过登录 Rancher 并在 UI 中打开 kubectl shell 来访问和管理你的集群。你无需进一步配置。
@@ -15,7 +15,7 @@ description: "了解如何通过 kubectl Shell 使用 kubectl,或通过 kubect
1. 转到要使用 kubectl 访问的集群,然后单击 **Explore**。
1. 在顶部导航菜单中,单击 **Kubectl Shell** 按钮。使用打开的窗口与你的 Kubernetes 集群进行交互。
-### 在工作站使用 kubectl 访问集群
+## 在工作站使用 kubectl 访问集群
本节介绍如何下载集群的 kubeconfig 文件、从工作站启动 kubectl 以及访问下游集群。
@@ -37,14 +37,13 @@ kubectl --kubeconfig /custom/path/kube.config get pods
```
1. 从工作站启动 kubectl。使用它与 Kubernetes 集群进行交互。
-
-### 使用 kubectl 创建的资源的注意事项
+## 使用 kubectl 创建的资源的注意事项
Rancher 会发现并显示由 `kubectl` 创建的资源。但是在发现资源的时候,这些资源可能没有包括所有必须的注释。如果资源已经使用 Rancher UI/API 进行操作(例如,扩展工作负载),但是由于缺少注释,资源的重新创建可能会触发。只有在首次对发现的资源进行操作时,这种情况才会发生。
## 直接使用下游集群进行身份验证
-本节旨在帮助你设置访问 [RKE 集群的替代方法](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本节旨在帮助你设置访问 [RKE 集群的替代方法](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
此方法仅适用于启用了[授权集群端点](../../../../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点)的 RKE、RKE2 和 K3s集群。在 Rancher 创建集群时,Rancher 会生成一个 kubeconfig 文件,其中包含用于访问集群的额外 kubectl 上下文。该上下文允许你使用 kubectl 通过下游集群进行身份验证,而无需通过 Rancher 进行身份验证。有关授权集群端点如何工作的详细说明,请参阅[此页面](authorized-cluster-endpoint.md)。
@@ -52,7 +51,6 @@ Rancher 会发现并显示由 `kubectl` 创建的资源。但是在发现资源
我们的最佳实践是使用此方法来访问 RKE、RKE2 和 K3s集群。这样,万一你无法连接到 Rancher,你仍然可以访问该集群。
-
:::note 先决条件:
以下步骤假设你已经创建了一个 Kubernetes 集群,并按照步骤[从工作站使用 kubectl 连接到集群](#在工作站使用-kubectl-访问集群)。
@@ -78,7 +76,7 @@ CURRENT NAME CLUSTER AUTHINFO N
当 `kubectl` 正常工作时,它确认你可以绕过 Rancher 的身份验证代理访问集群。
-### 直接连接到定义了 FQDN 的集群
+## 直接连接到定义了 FQDN 的集群
如果集群定义了 FQDN,将会创建一个引用 FQDN 的上下文。上下文将命名为 `-fqdn`。当你想在没有 Rancher 的情况下使用 `kubectl` 访问这个集群时,你需要使用这个上下文。
@@ -87,20 +85,25 @@ CURRENT NAME CLUSTER AUTHINFO N
```
kubectl --context -fqdn get nodes
```
+
直接引用 kubeconfig 文件的位置:
+
```
kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods
```
-### 直接连接到未定义 FQDN 的集群
+## 直接连接到未定义 FQDN 的集群
如果集群没有定义 FQDN,则会创建额外的上下文来引用 controlplane 中每个节点的 IP 地址。每个上下文将被命名为 `-`。当你想在没有 Rancher 的情况下使用 `kubectl` 访问这个集群时,你需要使用这个上下文。
假设 kubeconfig 文件位于 `~/.kube/config`:
+
```
kubectl --context - get nodes
```
+
直接引用 kubeconfig 文件的位置:
+
```
kubectl --kubeconfig /custom/path/kube.config --context - get pods
```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
index d881d6c968e..ea35bd9fc33 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/add-a-pod-security-policy.md
@@ -4,7 +4,7 @@ title: 添加 Pod 安全策略
:::note 先决条件:
-以下选项仅适用于[使用 RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+以下选项仅适用于[使用 RKE 启动的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
@@ -18,7 +18,7 @@ title: 添加 Pod 安全策略
:::note
- 此选项仅适用于[由 RKE 配置的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+ 此选项仅适用于[由 RKE 配置的集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
index 4ed2eb4d44d..f5095790dfc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md
@@ -23,9 +23,9 @@ description: 了解从 Rancher 启动的 Kubernetes 集群中删除节点时的
| 在 `management.cattle.io` API Group 下创建的所有资源 | ✓ | ✓ | ✓ | |
| Rancher v2.x 创建的所有 CRD | ✓ | ✓ | ✓ | |
-[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md
-[2]: ../../../pages-for-subheaders/use-existing-nodes.md
-[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md
+[1]: ../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md
+[2]: ../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+[3]: ../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md
[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
## 通过 Rancher UI 删除集群中的节点
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
index 1d76f44b72c..1ec13e03630 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md
@@ -11,7 +11,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
本文假设你已了解 Kubernetes 的持久卷、持久卷声明和存储类的概念。如需更多信息,请参阅[存储的工作原理](manage-persistent-storage/about-persistent-storage.md)部分。
-### 先决条件
+## 先决条件
设置持久存储需要`管理卷`的[角色](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)。
@@ -21,7 +21,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
如果要将现有的持久存储连接到集群,则不需要启用云提供商。
-### 设置现有存储
+## 设置现有存储
设置现有存储的总体流程如下:
@@ -32,7 +32,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
有关详细信息和先决条件,请参阅[此页面](manage-persistent-storage/set-up-existing-storage.md)。
-### 在 Rancher 中动态配置新存储
+## 在 Rancher 中动态配置新存储
配置新存储的总体流程如下:
@@ -42,7 +42,7 @@ description: "了解在 Kubernetes 中创建持久存储的两种方法:持久
有关详细信息和先决条件,请参阅[此页面](manage-persistent-storage/dynamically-provision-new-storage.md)。
-### Longhorn 存储
+## Longhorn 存储
[Longhorn](https://longhorn.io/) 是一个轻量级、可靠、易用的 Kubernetes 分布式块存储系统。
@@ -52,28 +52,28 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
Rancher v2.5 简化了在 Rancher 管理的集群上安装 Longhorn 的过程。详情请参见[本页面](../../../../integrations-in-rancher/longhorn/longhorn.md)。
-### 配置存储示例
+## 配置存储示例
我们提供了如何使用 [NFS](../provisioning-storage-examples/nfs-storage.md), [vSphere](../provisioning-storage-examples/vsphere-storage.md),和 [Amazon 的 EBS](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) 来配置存储的示例。
-### GlusterFS 卷
+## GlusterFS 卷
在将数据存储在 GlusterFS 卷上的集群中,你可能会遇到重启 `kubelet` 后 pod 无法挂载卷的问题。有关避免此情况发生的详细信息,请参阅[此页面](manage-persistent-storage/about-glusterfs-volumes.md)。
-### iSCSI 卷
+## iSCSI 卷
在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。有关解决此问题的详细信息,请参阅[此页面](manage-persistent-storage/install-iscsi-volumes.md)。
-### hostPath 卷
+## hostPath 卷
在创建 hostPath 卷之前,你需要在集群配置中设置 [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/)。这会将路径作为卷安装在你的 kubelet 中,可用于工作负载中的 hostPath 卷。
-### 将 vSphere Cloud Provider 从树内迁移到树外
+## 将 vSphere Cloud Provider 从树内迁移到树外
Kubernetes 正在逐渐不在树内维护云提供商。vSphere 有一个树外云提供商,可通过安装 vSphere 云提供商和云存储插件来使用。
有关如何从树内 vSphere 云提供商迁移到树外,以及如何在迁移后管理现有虚拟机,请参阅[此页面](../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md)。
-### 相关链接
+## 相关链接
- [Kubernetes 文档: 存储](https://kubernetes.io/docs/concepts/storage/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
index 9a4c878d631..69dd77f4d7e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md
@@ -4,7 +4,7 @@ title: GlusterFS 卷
:::note
-本文仅适用于 [RKE 集群](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+本文仅适用于 [RKE 集群](../../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
index 6582e5e0f50..0d0b638fd10 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md
@@ -15,11 +15,11 @@ title: 在 Rancher 中动态配置新存储
1. [添加一个存储类并将其配置为使用你的存储](#1-添加一个存储类并将其配置为使用你的存储)
2. [为使用 StatefulSet 部署的 Pod 使用存储类](#2-为使用-statefulset-部署的-pod-使用存储类)
-### 先决条件
+## 先决条件
- 设置持久存储需要`管理卷`的[角色](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)。
- 如果你要为云集群配置存储,则存储和集群主机必须使用相同的云提供商。
-- 必须启用云提供商。有关启用云提供商的详细信息,请参阅[此页面](../../../../../pages-for-subheaders/set-up-cloud-providers.md)。
+- 必须启用云提供商。有关启用云提供商的详细信息,请参阅[此页面](../../../kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。
- 确保你的存储卷插件可以启用。
默认情况下启用以下存储卷插件:
@@ -38,7 +38,7 @@ title: 在 Rancher 中动态配置新存储
如果你的存储卷插件没有在上述列表中,你需要[使用功能开关来启用不受支持的存储驱动](../../../../advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)。
-### 1. 添加一个存储类并将其配置为使用你的存储
+## 1. 添加一个存储类并将其配置为使用你的存储
这些步骤描述了如何在集群级别设置存储类:
@@ -55,7 +55,7 @@ title: 在 Rancher 中动态配置新存储
有关存储类参数的完整信息,请参阅官方 [Kubernetes 文档](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters)。
-### 2. 为使用 StatefulSet 部署的 Pod 使用存储类
+## 2. 为使用 StatefulSet 部署的 Pod 使用存储类
StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘性标识。在这个 StatefulSet 中,我们将配置一个 VolumeClaimTemplate。StatefulSet 管理的每个 Pod 都将部署一个基于此 VolumeClaimTemplate 的 PersistentVolumeClaim。PersistentVolumeClaim 将引用我们创建的 StorageClass。因此,在部署 StatefulSet 管理的每个 Pod 时,都会使用 PersistentVolumeClaim 中定义的 StorageClass 来绑定到动态配置的存储。
@@ -66,7 +66,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **StatefulSet**。
1. 在**卷声明模板**选项卡上,单击**添加声明模板**。
1. 输入持久卷的名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 点击**启动**。
@@ -80,7 +80,7 @@ StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘
1. 单击 **⋮ > 编辑配置**,转到使用由 StorageClass 配置的存储的工作负载。
1. 在**卷声明模板**中,单击**添加声明模板**。
1. 输入持久卷名称。
-1. 在*存储类*\*字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
+1. 在**存储类**字段中,选择将为此 StatefulSet 管理的 pod 动态配置存储的 StorageClass。
1. 在**挂载点**字段中,输入工作负载将用于访问卷的路径。
1. 单击**保存**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
index ea7fde37298..aaa2d24f67f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md
@@ -2,7 +2,7 @@
title: iSCSI 卷
---
-在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。成此问题的原因很可能是 iSCSI 启动器工具不兼容。你可以在每个集群节点上安装 iSCSI 启动器工具来解决此问题。
+在将数据存储在 iSCSI 卷上的 [Rancher 启动的 Kubernetes 集群](../../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可能会遇到 kubelet 无法自动连接 iSCSI 卷的问题。成此问题的原因很可能是 iSCSI 启动器工具不兼容。你可以在每个集群节点上安装 iSCSI 启动器工具来解决此问题。
将数据存储到 iSCSI 卷的由 Rancher 启动的 Kubernetes 集群使用 [iSCSI 启动器工具](http://www.open-iscsi.com/),该工具嵌入在 kubelet 的 `rancher/hyperkube` Docker 镜像中。该工具从每个 kubelet(即 _initiator_)发现并发起与 iSCSI 卷(即 _target_)的会话。但是,在某些情况下,initiator 和 target 上安装的 iSCSI 启动器工具的版本可能不匹配,从而导致连接失败。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
index 2232fc9c253..2b5575f7794 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md
@@ -16,12 +16,12 @@ title: 设置现有存储
2. [添加一个引用持久存储的 PersistentVolume](#2-添加一个引用持久存储的-persistentvolume)。
3. [为使用 StatefulSet 部署的 Pod 使用存储类](#3-为使用-statefulset-部署的-pod-使用存储类)
-### 先决条件
+## 先决条件
- 要将持久卷创建为 Kubernetes 资源,你必须具有`管理卷`的[角色。](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色参考)
- 如果你要为云集群配置存储,则存储和集群主机必须使用相同的云提供商。
-### 1. 设置持久存储
+## 1. 设置持久存储
在 Rancher 中创建持久卷不会创建存储卷。它只创建映射到现有卷的 Kubernetes 资源。因此,在你可以将持久卷创建为 Kubernetes 资源之前,你必须先配置存储。
@@ -29,7 +29,7 @@ title: 设置现有存储
如果你有一个块存储池并且不想使用云提供商,你可以使用 Longhorn 为 Kubernetes 集群提供持久存储。详情请参见[本页面](../../../../../integrations-in-rancher/longhorn.md)。
-### 2. 添加一个引用持久存储的 PersistentVolume
+## 2. 添加一个引用持久存储的 PersistentVolume
这些步骤描述了如何在 Kubernetes 的集群级别设置 PersistentVolume。
@@ -48,7 +48,7 @@ title: 设置现有存储
**结果**:已创建你的新持久卷。
-### 3. 为使用 StatefulSet 部署的 Pod 使用存储类
+## 3. 为使用 StatefulSet 部署的 Pod 使用存储类
StatefulSet 管理 Pod 的部署和扩展,同时为每个 Pod 维护一个粘性标识。在这个 StatefulSet 中,我们将配置一个 VolumeClaimTemplate。StatefulSet 管理的每个 Pod 都将部署一个基于此 VolumeClaimTemplate 的 PersistentVolumeClaim。PersistentVolumeClaim 将引用我们创建的 PersistentVolume。因此,在部署 StatefulSet 管理的每个 Pod 时,都会绑定一个 PersistentVolumeClaim 中定义的 PersistentVolume。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
index a262560e90f..4a684b2eabe 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md
@@ -234,7 +234,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
我们配置 AWS 后,我们需要创建虚拟机来引导集群:
-* master (etcd+controlplane):根据需要部署三个适当大小的 master 实例。详情请参见[生产就绪集群的建议](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md)。
+* master (etcd+controlplane):根据需要部署三个适当大小的 master 实例。详情请参见[生产就绪集群的建议](../../kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md)。
* IAM 角色:`K8sMasterRole`
* 安全组:`K8sMasterSg`
* 标签:
@@ -300,7 +300,7 @@ title: 通过 AWS EC2 Auto Scaling 组使用 Cluster Autoscaler
| max-node-provision-time | "15m" | CA 等待节点配置的最长时间 |
| nodes | - | 以云提供商接受的格式设置节点组的最小、最大大小和其他配置数据。可以多次使用。格式是 `::`。 |
| node-group-auto-discovery | - | 节点组自动发现的一个或多个定义。定义表示为 `:[[=]]` |
-| estimator | - | "binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
+| estimator |"binpacking" | 用于扩容的资源评估器类型。可用值:["binpacking"] |
| expander | "random" | 要在扩容中使用的节点组扩展器的类型。可用值:`["random","most-pods","least-waste","price","priority"]` |
| ignore-daemonsets-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 DaemonSet pod |
| ignore-mirror-pods-utilization | false | CA 为了缩容而计算资源利用率时,是否应忽略 Mirror pod |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/manage-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/manage-clusters.md
index f7d9bb6b990..676c25fb2a9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/manage-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/manage-clusters.md
@@ -16,7 +16,7 @@ title: 集群管理
## 在 Rancher 中管理集群
-将集群[配置到 Rancher](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md) 之后,[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles)需要管理这些集群。管理集群的选项如下:
+将集群[配置到 Rancher](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md) 之后,[集群所有者](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)需要管理这些集群。管理集群的选项如下:
import ClusterCapabilitiesTable from '../../../shared-files/_cluster-capabilities-table.md';
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
index a942534d1c6..81b6ea17eec 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md
@@ -9,11 +9,11 @@ title: 节点和节点池
1. 找到要管理其节点的集群,然后单击行末尾的**浏览**按钮。
1. 从左侧导航中选择**节点**。
-不同的集群配置[选项](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。
+不同的集群配置[选项](../kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)对应不同的可用节点选项。
:::note
-如果你想管理 _集群_ 而不是单个节点,请参阅[编辑集群](../../../pages-for-subheaders/cluster-configuration.md)。
+如果你想管理 _集群_ 而不是单个节点,请参阅[编辑集群](../../../reference-guides/cluster-configuration/cluster-configuration.md)。
:::
@@ -32,9 +32,9 @@ title: 节点和节点池
| [下载密钥](#通过-ssh-连接到由基础设施提供商托管的节点) | ✓ | | | | | 下载 SSH 密钥以通过 SSH 连接到节点。 |
| [节点缩放](#扩缩节点) | ✓ | | | ✓ | | 向上或向下扩展节点池中的节点数。 |
-[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md
-[2]: ../../../pages-for-subheaders/use-existing-nodes.md
-[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md
+[1]: ../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md
+[2]: ../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+[3]: ../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md
[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
[5]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md
@@ -43,17 +43,17 @@ title: 节点和节点池
### 由基础设施提供商托管的节点
-在[托管在基础设施提供商](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)中的节点上配置由 Rancher 启动的 Kubernetes 集群时,你可以使用节点池。
+在[托管在基础设施提供商](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)中的节点上配置由 Rancher 启动的 Kubernetes 集群时,你可以使用节点池。
-如果节点池被编辑,通过[节点池选项](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)配置的集群可以纵向扩容或缩容。
+如果节点池被编辑,通过[节点池选项](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点池)配置的集群可以纵向扩容或缩容。
-如果启用[节点自动替换功能](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点自动替换),节点池还可以自动维护在初始集群配置期间设置的节点规模。该规模决定了 Rancher 为集群维护的 active 节点的数量。
+如果启用[节点自动替换功能](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点自动替换),节点池还可以自动维护在初始集群配置期间设置的节点规模。该规模决定了 Rancher 为集群维护的 active 节点的数量。
-Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点模板)来替换节点池中的节点。每个节点模板都使用云提供商凭证来允许 Rancher 在基础设施提供商中设置节点。
+Rancher 使用[节点模板](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点模板)来替换节点池中的节点。每个节点模板都使用云提供商凭证来允许 Rancher 在基础设施提供商中设置节点。
### 由托管 Kubernetes 提供商配置的节点
-用于管理[由 Kubernetes 提供商托管](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)的节点的选项在 Rancher 中有些限制。例如,你不能使用 Rancher UI 向上或向下缩放节点数量,而是需要直接编辑集群。
+用于管理[由 Kubernetes 提供商托管](../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)的节点的选项在 Rancher 中有些限制。例如,你不能使用 Rancher UI 向上或向下缩放节点数量,而是需要直接编辑集群。
### 注册节点
@@ -72,13 +72,13 @@ Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-i
## 在 Rancher API 中查看节点
-选择此选项以查看节点的 [API 端点](../../../pages-for-subheaders/about-the-api.md)。
+选择此选项以查看节点的 [API 端点](../../../api/quickstart.md)。
## 删除节点
使用 **Delete** 从云提供商中删除有缺陷的节点。
-当你删除有缺陷的节点时,如果该节点在节点池中并启用了[节点自动替换](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点自动替换),Rancher 可以自动将其替换为具有相同配置的节点。
+当你删除有缺陷的节点时,如果该节点在节点池中并启用了[节点自动替换](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点自动替换),Rancher 可以自动将其替换为具有相同配置的节点。
:::tip
@@ -88,11 +88,11 @@ Rancher 使用[节点模板](../../../pages-for-subheaders/use-new-nodes-in-an-i
## 扩缩节点
-对于由基础设施提供商托管的节点,你可以使用缩放控件来缩放每个[节点池](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#节点池)中的节点数量。此选项不适用于其他集群类型。
+对于由基础设施提供商托管的节点,你可以使用缩放控件来缩放每个[节点池](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点池)中的节点数量。此选项不适用于其他集群类型。
## 通过 SSH 连接到由基础设施提供商托管的节点
-对于[由基础设施提供商托管的节点](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md),你可以选择下载其 SSH 密钥,以便从桌面远程连接到它。
+对于[由基础设施提供商托管的节点](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),你可以选择下载其 SSH 密钥,以便从桌面远程连接到它。
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到要通过 SSH 连接到节点的集群,然后单击集群名称。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
index f8e5d94e510..06bad6f6e14 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md
@@ -34,10 +34,10 @@ Kubernetes 支持由同一个物理集群支持的多个虚拟集群。这些虚
你可以将以下资源直接分配给命名空间:
-- [工作负载](../../../pages-for-subheaders/workloads-and-pods.md)
-- [负载均衡器/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md)
+- [工作负载](../kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)
+- [负载均衡器/Ingress](../kubernetes-resources-setup/load-balancer-and-ingress-controller/load-balancer-and-ingress-controller.md)
- [服务发现记录](../../new-user-guides/kubernetes-resources-setup/create-services.md)
-- [持久卷声明](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md)
+- [持久卷声明](./create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)
- [证书](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md)
- [ConfigMap](../../new-user-guides/kubernetes-resources-setup/configmaps.md)
- [镜像仓库](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md)
@@ -169,14 +169,14 @@ Rancher 在 Kubernetes 之上进行了扩展,除了集群级别之外,还允
### 4. 可选:添加资源配额
-资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+资源配额用于限制项目(及其命名空间)可以使用的资源。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
要添加资源配额:
1. 在**资源配额**选项卡中,单击**添加资源**。
-1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](projects-and-namespaces.md)。
+1. 选择一个**资源类型**。有关详细信息,请参阅[资源配额](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 输入**项目限制**和**命名空间默认限制**的值。
-1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../../pages-for-subheaders/manage-project-resource-quotas.md)。
+1. **可选**:指定**容器默认资源限制**,这将应用于项目中启动的所有容器。如果资源配额设置了 CPU 或内存限制,则建议使用该参数。可以在单个命名空间或容器级别上覆盖它。有关详细信息,请参阅[容器默认资源限制](../../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)。
1. 单击**创建**。
**结果**:项目已创建。你可以从集群的**项目/命名空间**视图中查看它。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
index 569a052d5fe..35b432d9f01 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/nfs-storage.md
@@ -6,7 +6,7 @@ title: NFS 存储
:::note
-- 如果你已经拥有 NFS 共享,则无需配置新的 NFS 服务器即可在 Rancher 中使用 NFS 卷插件。这样的话,你可以跳过此过程的其余部分并直接[添加存储](../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md)。
+- 如果你已经拥有 NFS 共享,则无需配置新的 NFS 服务器即可在 Rancher 中使用 NFS 卷插件。这样的话,你可以跳过此过程的其余部分并直接[添加存储](../create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)。
- 此教程演示了如何使用 Ubuntu 设置 NFS 服务器。你也应该能够将这些说明用于其他 Linux 发行版(例如 Debian、RHEL、Arch Linux 等)。有关如何使用另一个 Linux 发行版创建 NFS 服务器的官方说明,请参阅发行版的文档。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
index 1fb1c5d5e06..db36152d271 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/provisioning-storage-examples/vsphere-storage.md
@@ -6,12 +6,11 @@ title: vSphere 存储
为了在 vSphere 中动态调配存储,必须启用 vSphere 提供商。有关更多信息,请参阅[树外 vSphere](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere.md) 和[树内 vSphere](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere.md)。
+## 先决条件
-### 先决条件
+为了在 [Rancher Kubernetes Engine (RKE)](../../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 集群中配置 vSphere 卷,[vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) 必须在[集群选项](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)中显式启用。
-为了在 [Rancher Kubernetes Engine (RKE)](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群中配置 vSphere 卷,[vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) 必须在[集群选项](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)中显式启用。
-
-### 创建一个 StorageClass
+## 创建一个 StorageClass
:::tip
@@ -31,7 +30,7 @@ title: vSphere 存储
5. 可选地,你可以在**参数**下指定存储类的其他属性。有关详细信息,请参阅 [vSphere 存储文档](https://github.com/vmware-archive/vsphere-storage-for-kubernetes/blob/master/documentation/storageclass.md)。
5. 单击**创建**。
-### 创建使用 vSphere 卷的工作负载
+## 创建使用 vSphere 卷的工作负载
1. 在左侧导航栏中,单击**工作负载**。
1. 单击**创建**。
@@ -43,7 +42,7 @@ title: vSphere 存储
7. 在**挂载点**字段中指定路径。这是卷将安装在容器文件系统中的完整路径,例如 `/persistent`。
8. 单击**创建**。
-### 验证卷的持久性
+## 验证卷的持久性
1. 在左侧导航栏中,单击**工作负载 > Pod**。
1. 转到你刚刚创建的工作负载,然后单击 **⋮ > 执行命令行**。
@@ -58,7 +57,7 @@ title: vSphere 存储

-### 为什么使用 StatefulSet 替代 Deployment
+## 为什么使用 StatefulSet 替代 Deployment
对于消耗 vSphere 存储的工作负载,你应该始终使用 [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/),因为这种资源类型旨在解决 VMDK 块存储警告。
@@ -66,7 +65,7 @@ title: vSphere 存储
即使使用仅具有单个副本的 deployment 资源也可能在更新 deployment 时出现死锁情况。如果更新的 pod 被调度到不同的节点,由于 VMDK 仍然连接到另一个节点,因此 pod 将无法启动。
-### 相关链接
+## 相关链接
- [用于 Kubernetes 的 vSphere 存储](https://github.com/vmware-archive/vsphere-storage-for-kubernetes/tree/master/documentation)
- [Kubernetes 持久卷](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
index 62550604077..be1431337b7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-certificates.md
@@ -47,7 +47,7 @@ title: 证书轮换
:::
-### 证书轮换
+## 证书轮换
Rancher 启动的 Kubernetes 集群能够通过 UI 轮换自动生成的证书。
@@ -62,7 +62,7 @@ Rancher 启动的 Kubernetes 集群能够通过 UI 轮换自动生成的证书
**结果**:将轮换所选证书,相关服务将重新启动以使用新证书。
-### 补充说明
+## 补充说明
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
index cfebc6443aa..b8c52cf70c5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-clusters/rotate-encryption-key.md
@@ -2,7 +2,7 @@
title: 加密密钥轮换
---
-### RKE1 加密密钥轮换
+## RKE1 加密密钥轮换
1. 使用以下两个选项之一来启用加密密钥轮换:
@@ -30,7 +30,7 @@ title: 加密密钥轮换
-### RKE2 加密密钥轮换
+## RKE2 加密密钥轮换
_**v2.6.7 新功能**_
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md
index 1c8aeba0767..28580d3a601 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/how-to-guides/new-user-guides/manage-namespaces.md
@@ -8,10 +8,10 @@ title: 命名空间
可以直接分配给命名空间的资源包括:
-- [工作负载](../../pages-for-subheaders/workloads-and-pods.md)
-- [负载均衡器/Ingress](../../pages-for-subheaders/load-balancer-and-ingress-controller.md)
+- [工作负载](./kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)
+- [负载均衡器/Ingress](./kubernetes-resources-setup/load-balancer-and-ingress-controller/load-balancer-and-ingress-controller.md)
- [服务发现记录](kubernetes-resources-setup/create-services.md)
-- [持久卷声明](../../pages-for-subheaders/create-kubernetes-persistent-storage.md)
+- [持久卷声明](./manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)
- [证书](kubernetes-resources-setup/encrypt-http-communication.md)
- [ConfigMap](kubernetes-resources-setup/configmaps.md)
- [镜像仓库](kubernetes-resources-setup/kubernetes-and-docker-registries.md)
@@ -21,11 +21,11 @@ title: 命名空间
:::note
-如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](manage-namespaces.md),以确保你有权访问该命名空间。
+如果你使用 `kubectl`创建命名空间,由于 `kubectl` 不要求将新命名空间限定在你可以访问的项目内,因此你可能无法使用该命名空间。如果你的权限仅限于项目级别,则最好[通过 Rancher 创建命名空间](#创建命名空间),以确保你有权访问该命名空间。
:::
-### 创建命名空间
+## 创建命名空间
创建一个新的命名空间来隔离项目中的应用和资源。
@@ -40,13 +40,13 @@ title: 命名空间
1. 单击**集群 > 项目/命名空间**。
1. 转到要添加命名空间的项目,并单击**创建命名空间**。或者,你也可以转到**不在项目内**以创建不与项目关联的命名空间。
-1. **可选**:如果你的项目具有有效的[资源配额](../../pages-for-subheaders/manage-project-resource-quotas.md),你可以覆盖默认资源**限制**(限制命名空间可以使用的资源)。
+1. **可选**:如果你的项目具有有效的[资源配额](../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md),你可以覆盖默认资源**限制**(限制命名空间可以使用的资源)。
1. 输入**名称**,然后单击**创建**。
**结果**:已将命名空间添加到项目中。你可以开始将集群资源分配给命名空间。
-### 将命名空间移动到另一个项目
+## 将命名空间移动到另一个项目
在某些情况下(例如希望其他团队使用该应用时),集群管理员和成员可能需要将命名空间移动到另一个项目:
@@ -60,14 +60,14 @@ title: 命名空间
:::note 注意事项:
- 不要移动 `System` 项目中的命名空间。移动命名空间可能会对集群网络产生不利影响。
- - 你不能将命名空间移动到已配置[资源配额](../../pages-for-subheaders/manage-project-resource-quotas.md)的项目中。
+ - 你不能将命名空间移动到已配置[资源配额](../advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md)的项目中。
- 如果你将命名空间从已设置配额的项目移动到未设置配额的项目,则会删除该命名空间的配额。
1. 为新命名空间选择一个新项目,然后单击**移动**。你也可以选择**无**,从而将命名空间从所有项目中移除。
**结果**:你的命名空间已移至其他项目(或从所有项目中移除)。如果命名空间绑定了项目资源,命名空间会释放这些资源,然后绑定新项目的资源。
-### 编辑命名空间资源配额
+## 编辑命名空间资源配额
你可以覆盖命名空间默认限制,从而为特定命名空间提供对更多(或更少)项目资源的访问权限:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md
index 7a530783b36..2b85b101377 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/configuration-reference.md
@@ -10,7 +10,7 @@ title: 配置
1. 在**集群**页面上,转到要配置 CIS 扫描的集群,然后单击 **Explore**。
1. 在左侧导航栏中,单击 **CIS Benchmark**。
-### 扫描
+## 扫描
扫描是用来根据定义的配置文件,在集群上触发 CIS 扫描的。扫描完成后会创建一份报告。
@@ -27,7 +27,7 @@ spec:
scanProfileName: rke-profile-hardened
```
-### 配置文件
+## 配置文件
配置文件包含 CIS 扫描的配置,包括要使用的 Benchmark 测试版本以及要在该 Benchmark 测试中跳过的测试。
@@ -62,7 +62,7 @@ spec:
- "1.1.21"
```
-### Benchmark 版本
+## Benchmark 版本
Benchmark 版本是指使用 `kube-bench` 运行的 Benchmark 名称,以及该 Benchmark 的有效配置参数。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md
index 6818ac56118..8cd3c582736 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cis-scans/custom-benchmark.md
@@ -13,7 +13,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
按照以下所有步骤添加自定义 Benchmark 版本并使用它运行扫描。
-### 1. 准备自定义 Benchmark 版本 ConfigMap
+## 1. 准备自定义 Benchmark 版本 ConfigMap
要创建自定义 Benchmark 版本,你需要先创建一个包含 Benchmark 版本配置文件的 ConfigMap,并将其上传到要运行扫描的 Kubernetes 集群。
@@ -38,7 +38,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
kubectl create configmap -n foo --from-file=
```
-### 2. 将自定义 Benchmark 版本添加到集群
+## 2. 将自定义 Benchmark 版本添加到集群
1. 在左上角,单击 **☰ > 集群管理**。
1. 在**集群**页面上,转到要添加自定义 Benchmark 的集群,然后单击 **Explore**。
@@ -50,7 +50,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
1. 添加最低和最高 Kubernetes 版本限制(如果有)。
1. 单击**创建**。
-### 3. 为自定义 Benchmark 版本创建新配置文件
+## 3. 为自定义 Benchmark 版本创建新配置文件
要使用你的自定义 Benchmark 版本运行扫描,你需要添加一个指向此 Benchmark 版本的新配置文件:
@@ -62,7 +62,7 @@ title: 为集群扫描创建自定义 Benchmark 版本
1. 在下拉列表中选择 Benchmark 版本。
1. 单击**创建**。
-### 4. 使用自定义 Benchmark 版本运行扫描
+## 4. 使用自定义 Benchmark 版本运行扫描
指向你的自定义 Benchmark 版本的 `foo` 配置文件创建完成后,你可以创建一个新的扫描,从而在 Benchmark 版本中运行自定义测试。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
index 8431d4fbd66..5a6fed5b32a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md
@@ -2,7 +2,7 @@
title: 先决条件
---
-### 1. 设置许可证管理器和购买支持
+## 1. 设置许可证管理器和购买支持
首先,完成许可证管理器设置的[第一步](https://docs.aws.amazon.com/license-manager/latest/userguide/getting-started.html)。
然后,转到 AWS Marketplace。找到 “Rancher Premium Support Billing Container Starter Pack”。最后,购买至少一项 Entitlement。
@@ -11,7 +11,7 @@ title: 先决条件
> **注意**:每项 Entitlement 都对一定数量的节点授予访问支持的权限。你可以后续根据需要购买更多许可证。
-### 2. 创建 EKS 集群
+## 2. 创建 EKS 集群
按照 [Rancher 文档](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md)创建 EKS 集群。进行到[安装 Rancher Helm Chart](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#8-安装-rancher-helm-chart)(最后一步)时,**停止并返回此页面**。该集群需要满足以下要求:
- EKS 1.22 版本。
@@ -20,7 +20,7 @@ title: 先决条件
- 集群中的每个节点都可以访问许可证管理器服务。
- 集群中的每个节点都可以访问 STS 服务的全局端点。
-### 3. 安装 Rancher
+## 3. 安装 Rancher
除了在 [Rancher 文档](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#8-安装-rancher-helm-chart)中指定的 Rancher 安装选项外,你还需要启用其它指标。
你可以通过 Helm CLI 使用以下选项来完成:
@@ -39,11 +39,11 @@ extraEnv:
你还需要安装 Rancher 2.6.7 或更高版本。
-### 4. 创建 OIDC 提供程序
+## 4. 创建 OIDC 提供程序
按照 [AWS 文档](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)为上一节中指定的集群创建 OIDC 提供程序。
-### 5. 创建 IAM 角色
+## 5. 创建 IAM 角色
CSP Adapter 需要 IAM 角色才能签入/签出 Entitlement。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
index 831bf952a6d..8e3d743ef27 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md
@@ -4,7 +4,7 @@ title: 安装 Adapter
> **重要提示**:如果你尝试重新安装 Adapter,你可能会在长达一小时的时间内收到不合规的错误消息。
-### Rancher 与 Adapter 的兼容性矩阵
+## Rancher 与 Adapter 的兼容性矩阵
:::note 重要提示:
@@ -23,7 +23,7 @@ title: 安装 Adapter
| v2.7.5 | v2.0.2 |
-### 1. 获取对 Local 集群的访问权限
+## 1. 获取对 Local 集群的访问权限
> **注意**:只有管理员用户才能访问 Local 集群。因为 CSP Adapter 必须安装在 Local 集群中,所以此安装必须由管理员用户执行。
@@ -33,7 +33,7 @@ title: 安装 Adapter
export KUBECONFIG=$TOKEN_PATH
```
-### 2. 创建 Adapter 命名空间
+## 2. 创建 Adapter 命名空间
创建要安装 Adapter 的命名空间:
@@ -41,7 +41,7 @@ export KUBECONFIG=$TOKEN_PATH
kubectl create ns cattle-csp-adapter-system
```
-### 3. 创建证书密文
+## 3. 创建证书密文
Adapter 需要访问 Rancher 用来与 Rancher Server 通信的根 CA。有关 Rancher 支持的证书选项的更多信息,请参阅 [Chart 选项页面](../../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md)。
@@ -63,7 +63,7 @@ kubectl -n cattle-csp-adapter-system create secret generic tls-ca-additional --f
> **重要提示**:不要更改文件名或创建的密文的名称,否则可能会导致 Adapter 运行出错。
-### 4. 安装 Chart
+## 4. 安装 Chart
首先,使用以下命令添加 `rancher/charts` 仓库:
@@ -134,7 +134,7 @@ helm install rancher-csp-adapter rancher-charts/rancher-csp-adapter -f values.ya
-### 5. 管理证书更新
+## 5. 管理证书更新
如果你在[步骤 3](#3-创建证书密文) 中创建了一个用于存储自定义证书的密文,则随着证书的轮换,你将需要更新此密文。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
index f290d881ae9..ddc19f8353e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/uninstall-adapter.md
@@ -2,19 +2,19 @@
title: 卸载 Adapter
---
-### 1. 使用 Helm 卸载 Adapter Chart:
+## 1. 使用 Helm 卸载 Adapter Chart:
```bash
helm uninstall rancher-csp-adapter -n cattle-csp-adapter-system
```
-### 2. 删除为 Adapter 创建的命名空间:
+## 2. 删除为 Adapter 创建的命名空间:
```bash
kubectl delete ns cattle-csp-adapter-system
```
-### 3. (可选)删除未完成的用户通知:
+## 3. (可选)删除未完成的用户通知:
```bash
kubectl delete RancherUserNotification csp-compliance
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md
index f5f0d722487..d34958468e7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cloud-marketplace/supportconfig.md
@@ -8,7 +8,7 @@ title: Supportconfig Bundle
> **注意**:无论采用何种方法,只有管理员可以生成/下载 Supportconfig Bundle。
-### 通过 Rancher 访问
+## 通过 Rancher 访问
首先,点击汉堡菜单。然后单击 `Get Support` 按钮。
@@ -20,7 +20,7 @@ title: Supportconfig Bundle

-### 不通过 Rancher 进行访问
+## 不通过 Rancher 进行访问
首先,为安装 Rancher 的集群生成 kubeconfig。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cluster-api/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cluster-api/overview.md
index 5da4470bfd6..531464d5d9b 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cluster-api/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/cluster-api/overview.md
@@ -185,7 +185,7 @@ stringData:
:::note
-请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/tasks/capi-operator/intro)
+请记住,如果使用此安装选项,你必须自行管理 CAPI Operator 的安装。你可以参照 Rancher Turtles 文档中的 [CAPI Operator 指南](https://turtles.docs.rancher.com/contributing/install_capi_operator)
:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/fleet/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/fleet/overview.md
index 55b89b8ee08..7f2c1f26083 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/fleet/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/fleet/overview.md
@@ -12,7 +12,7 @@ Fleet 是 Rancher 的一个独立项目,可以通过 Helm 安装在任何 Kube
## 架构
-有关 Fleet 如何运作的信息,请参阅[架构](./architecture)页面。
+有关 Fleet 如何运作的信息,请参阅[架构](./architecture.md)页面。
## 在 Rancher UI 中访问 Fleet
@@ -39,7 +39,7 @@ Fleet 预安装在 Rancher 中,并由 Rancher UI 中的**持续交付**选项
## Windows 支持
-有关对具有 Windows 节点的集群的支持的详细信息,请参阅 [Windows 支持](./windows-support)页面。
+有关对具有 Windows 节点的集群的支持的详细信息,请参阅 [Windows 支持](./windows-support.md)页面。
## GitHub 仓库
@@ -47,7 +47,7 @@ Fleet Helm charts 可在[此处](https://github.com/rancher/fleet/releases)获
## 在代理后使用 Fleet
-有关在代理后面使用 Fleet 的详细信息,请参阅[在代理后使用 Fleet](./use-fleet-behind-a-proxy)页面。
+有关在代理后面使用 Fleet 的详细信息,请参阅[在代理后使用 Fleet](./use-fleet-behind-a-proxy.md)页面。
## Helm Chart 依赖
@@ -57,7 +57,7 @@ git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖。 你
## 故障排除
-- **已知问题**:Fleet gitrepos 的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator) 创建的备份或恢复中。一旦有永久的解决方案,我们将更新社区内容。
+- **已知问题**:Fleet gitrepos 的 clientSecretName 和 helmSecretName 密文不包含在 [backup-restore-operator](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。一旦有永久的解决方案,我们将更新社区内容。
- **临时解决方法**:默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则有必要重新创建密文。要修改 ResourceSet 以包含要备份的额外资源,请参阅文档[此处](https://github.com/rancher/backup-restore-operator#user-flow)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester.md
index 81823b2999a..eb6bf4742bb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester.md
@@ -4,9 +4,9 @@ title: Harvester 集成
Harvester 是 Rancher 2.6.1 新增的功能,[Harvester](https://docs.harvesterhci.io/) 是基于 Kubernetes 构建的开源超融合基础架构 (HCI) 软件。Harvester 安装在裸金属服务器上,提供集成的虚拟化和分布式存储功能。虽然 Harvester 使用 Kubernetes 运行,但它不需要用户了解 Kubernetes 概念,因此是一个更加用户友好的应用。
-### 功能开关
+## 功能开关
-你可以使用 Harvester 的功能开关来管理 Harvester 在 Rancher 虚拟化管理页面的访问,用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关是默认启用的。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../pages-for-subheaders/enable-experimental-features.md)。
+你可以使用 Harvester 的功能开关来管理 Harvester 在 Rancher 虚拟化管理页面的访问,用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关是默认启用的。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
要导航到 Harvester 集群,请单击 **☰ > 虚拟化管理**。在 **Harvester 集群**页面中,单击集群以转到该 Harvester 集群的视图。
@@ -18,15 +18,15 @@ Harvester 是 Rancher 2.6.1 新增的功能,[Harvester](https://docs.harvester
* 用户只能在**虚拟化管理**页面上导入 Harvester 集群。在**集群管理**页面上导入集群是不支持的,而且会出现警告。建议你返回**虚拟化管理**页面执行此操作。
-### Harvester 主机驱动
+## Harvester 主机驱动
[Harvester 主机驱动](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/) 通常可用于 Rancher 中的 RKE 和 RKE2 选项。无论 Harvester 功能开关是否启用,主机驱动都是可用的。请注意,默认情况下主机驱动是关闭的。用户只能通过**集群管理**页面在 Harvester 上创建 RKE 或 RKE2 集群。
Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher UI 不支持。这是因为 `.ISO` 镜像通常需要额外的设置,这会干扰干净的部署(即无需用户干预),并且它们通常不用于云环境。
-如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../pages-for-subheaders/about-provisioning-drivers.md#主机驱动)。
+如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)。
-### 端口要求
+## 端口要求
可以在[此处](https://docs.harvesterhci.io/v1.1/install/requirements#networking)找到 Harvester 集群的端口要求。
@@ -37,7 +37,7 @@ Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher
对于其他集群(例如 K3s 和 RKE1)的其他端口要求,请参阅[这些文档](https://docs.harvesterhci.io/v1.1/install/requirements/#guest-clusters)。
-### 限制
+## 限制
---
**仅适用于 Rancher v2.6.1 和 v2.6.2**:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester/overview.md
index ac720bec060..b5ee0ff89eb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/harvester/overview.md
@@ -8,7 +8,7 @@ title: 概述
[Harvester](https://docs.harvesterhci.io/) 是 Rancher v2.6.1 新增的功能,是基于 Kubernetes 构建的开源超融合基础架构(HCI)软件。Harvester 安装在裸金属服务器上,提供集成的虚拟化和分布式存储功能。虽然 Harvester 使用 Kubernetes 运行,但它不需要用户了解 Kubernetes 概念,这使得它更加用户友好。
-### 功能开关
+## 功能开关
Harvester 功能开关用于管理对 Rancher 中虚拟化管理(VM)页面的访问,用户可以直接导航到 Harvester 集群并访问 Harvester UI。Harvester 的功能开关默认启用。如需了解 Rancher 中功能开关的更多详细信息,请单击[此处](../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
@@ -22,15 +22,15 @@ Harvester 功能开关用于管理对 Rancher 中虚拟化管理(VM)页面
- 用户只能在虚拟化管理页面上导入 Harvester 集群。不支持在集群管理页面上导入集群,并且会出现警告,建议你返回虚拟化管理页面执行此操作。
-### Harvester 主机驱动
+## Harvester 主机驱动
[Harvester 主机驱动](https://docs.harvesterhci.io/v1.1/rancher/node/node-driver/)通常可用于 Rancher 中的 RKE 和 RKE2 选项。无论 Harvester 功能开关是否启用,主机驱动都是可用的。请注意,主机驱动默认处于关闭状态。用户只能通过集群管理页面在 Harvester 上创建 RKE 或 RKE2 集群。
Harvester 允许通过 Harvester UI 上传和显示 `.ISO` 镜像,但 Rancher UI 是不支持的。这是因为 `.ISO` 镜像通常需要额外的设置,这会干扰干净的部署(即无需用户干预),并且它们通常不用于云环境。
-如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers#主机驱动)。
+如需了解 Rancher 中主机驱动的更多详细信息,请单击[此处](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md#主机驱动)。
-### 端口要求
+## 端口要求
Harvester 集群的端口要求可以在[此处](https://docs.harvesterhci.io/v1.1/install/requirements#networking)找到。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.md
new file mode 100644
index 00000000000..24f35f24e37
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.md
@@ -0,0 +1,18 @@
+---
+title: Rancher 中的集成
+---
+
+
+
+
+
+Prime 是 Rancher 生态系统的企业级产品,具有更高的安全性、更长的生命周期和对 Prime 专有文档的访问权限。Rancher Prime 安装资产托管在受信任的 SUSE 注册表上,由 Rancher 拥有和管理。受信任的 Prime 注册表仅包括经过社区测试的稳定版本。
+
+Prime 还提供生产支持选项,以及根据你的商业需求定制的订阅附加组件。
+
+要了解更多信息并开始使用 Rancher Prime,请访问[本页](https://www.rancher.com/quick-start)。
+
+import DocCardList from '@theme/DocCardList';
+import { useCurrentSidebarCategory } from '@docusaurus/theme-common/internal';
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.mdx b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.mdx
deleted file mode 100644
index c0a824cc9da..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/integrations-in-rancher.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
----
-title: Rancher 中的集成
----
-
-
-
-
-
-import { Card, CardSection } from "@site/src/components/CardComponents";
-import { RocketRegular } from "@fluentui/react-icons";
-
-Prime 是 Rancher 生态系统的企业级产品,具有更高的安全性、更长的生命周期和对 Prime 专有文档的访问权限。Rancher Prime 安装资产托管在受信任的 SUSE 注册表上,由 Rancher 拥有和管理。受信任的 Prime 注册表仅包括经过社区测试的稳定版本。
-
-Prime 还提供生产支持选项,以及根据你的商业需求定制的订阅附加组件。
-
-要了解更多信息并开始使用 Rancher Prime,请访问[本页](https://www.rancher.com/quick-start)。
-
- }>
-
-
-
-
-
-
-
-
-
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/configuration-options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/configuration-options.md
index 800b4e576da..e0097356be2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/configuration-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/configuration-options.md
@@ -6,15 +6,15 @@ title: 配置选项
-### Egress 支持
+## Egress 支持
默认情况下,Egress 网关是禁用的,但你可以在安装或升级时使用 values.yaml 或[覆盖文件](#覆盖文件)启用它。
-### 启用自动 Sidecar 注入
+## 启用自动 Sidecar 注入
默认情况下,自动 sidecar 注入是禁用的。要启用此功能,请在安装或升级时在 values.yaml 中设置 `sidecarInjectorWebhook.enableNamespacesByDefault=true`。这会自动将 Istio sidecar 注入到所有已部署的新命名空间。
-### 覆盖文件
+## 覆盖文件
覆盖文件用于为 Istio 进行更广泛的配置。它允许你更改 [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/) 中可用的任何值。你可以自定义默认安装以满足你的需求。
@@ -22,7 +22,7 @@ title: 配置选项
有关覆盖文件的更多信息,请参阅 [Istio 文档](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings)
-### 选择器和抓取配置
+## 选择器和抓取配置
Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=false`,即在默认情况下跨所有命名空间进行监控。这样,你可以查看部署在具有 `istio-injection=enabled` 标签的命名空间中的资源的流量、指标和图。
@@ -30,14 +30,14 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
详情请参阅[本节](selectors-and-scrape-configurations.md)。
-### 在具有 Pod 安全策略的情况下启用 Istio
+## 在具有 Pod 安全策略的情况下启用 Istio
详情请参阅[本节](pod-security-policies.md)。
-### 在 RKE2 集群上安装 Istio 的其他步骤
+## 在 RKE2 集群上安装 Istio 的其他步骤
详情请参阅[本节](install-istio-on-rke2-cluster.md)。
-### 项目网络隔离的其他步骤
+## 项目网络隔离的其他步骤
详情请参阅[本节](project-network-isolation.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
index 2ceed9acdce..44370e67421 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/pod-security-policies.md
@@ -19,7 +19,7 @@ Istio CNI 插件不再要求每个应用 pod 具有特权 `NET_ADMIN` 容器。
2. [启用 CNI](#2-启用-cni)
3. [验证 CNI 是否正常工作](#3-验证-cni-是否正常工作)
-### 1. 将 PodSecurityPolicy 设置为不受限制
+## 1. 将 PodSecurityPolicy 设置为不受限制
不受限制的 PSP 支持安装 Istio。
@@ -31,7 +31,7 @@ Istio CNI 插件不再要求每个应用 pod 具有特权 `NET_ADMIN` 容器。
1. 找到**项目: System**,然后选择 **⋮ > 编辑配置**。
1. 将 Pod 安全策略选项更改为不受限制,然后单击**保存**。
-### 2. 启用 CNI
+## 2. 启用 CNI
通过 **Apps** 安装或升级 Istio 时:
@@ -47,7 +47,7 @@ istio_cni.enabled: true
在集群中启用 CNI 后,Istio 应该能成功安装。
-### 3. 验证 CNI 是否正常工作
+## 3. 验证 CNI 是否正常工作
通过部署[示例应用](https://istio.io/latest/docs/examples/bookinfo/)或部署你自己的应用,来验证 CNI 是否正常工作。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
index 5a40d032482..9828c2b351d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md
@@ -9,7 +9,7 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
如果你想将 Prometheus 限制为特定的命名空间,请设置 `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`。完成此操作后,你需要添加其他配置来继续监控你的资源。
-### 通过将 ignoreNamespaceSelectors 设置为 True 来限制对特定命名空间的监控
+## 通过将 ignoreNamespaceSelectors 设置为 True 来限制对特定命名空间的监控
要限制对特定命名空间的监控,你需要编辑 `ignoreNamespaceSelectors` Helm Chart 选项。你可以在安装或升级 Monitoring Helm Chart 时配置此选项:
@@ -18,14 +18,14 @@ Monitoring 应用设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=f
**结果**:Prometheus 将仅用于特定命名空间。换言之,你需要设置以下配置之一才能继续在各种仪表板中查看数据。
-### 让 Prometheus 检测其他命名空间中的资源
+## 让 Prometheus 检测其他命名空间中的资源
如果设置了 `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`,则有两种方法让 Prometheus 检测其他命名空间中的资源:
- **监控特定的命名空间**:在命名空间中添加一个 ServiceMonitor 或 PodMonitor 以及要抓取的目标。
- **跨命名空间监控**:将 `additionalScrapeConfig` 添加到你的 rancher-monitoring 实例,从而抓取所有命名空间中的所有目标。
-### 监控特定命名空间:创建 ServiceMonitor 或 PodMonitor
+## 监控特定命名空间:创建 ServiceMonitor 或 PodMonitor
此选项用于定义在特定命名空间中要监控的服务或 pod。
@@ -81,7 +81,7 @@ spec:
targetLabel: pod_name
```
-### 跨命名空间监控:将 ignoreNamespaceSelectors 设置为 False
+## 跨命名空间监控:将 ignoreNamespaceSelectors 设置为 False
此设置为 Prometheus 提供额外的抓取配置来实现跨命名空间监控。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/cpu-and-memory-allocations.md
index 3a19c21dcc5..c1aad59c6ee 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/cpu-and-memory-allocations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/istio/cpu-and-memory-allocations.md
@@ -41,7 +41,7 @@ Kubernetes 中的资源请求指的是,除非该节点至少具有指定数量
1. 在左侧导航栏中,点击 **Apps**。
1. 点击**已安装的应用**。
1. 转到 `istio-system` 命名空间。在某个 Istio 工作负载中(例如 `rancher-istio`),点击**⋮ > 编辑/升级**。
-1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](../../pages-for-subheaders/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](cpu-and-memory-allocations.md#编辑覆盖文件)。
+1. 点击**升级**,然后通过更改 values.yaml 或添加[覆盖文件](./configuration-options/configuration-options.md#覆盖文件)来编辑基本组件。有关编辑覆盖文件的更多信息,请参阅[本节](#编辑覆盖文件)。
1. 更改 CPU 或内存分配、调度各个组件的节点,或节点容忍度。
1. 点击**升级**。然后,更改就能启用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
index b45cce6b4d7..61916c318be 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/kubernetes-distributions/kubernetes-distributions.md
@@ -13,7 +13,7 @@ K3s 是一款轻量级、完全兼容的 Kubernetes 发行版,专为一系列
### K3s 与 Rancher
- Rancher 允许在一系列平台上轻松配置 K3s,包括 Amazon EC2、DigitalOcean、Azure、vSphere 或现有服务器。
-- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup#cluster-management-capabilities-by-cluster-type)。
+- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md#按集群类型划分的集群管理功能)。
## RKE2
@@ -31,4 +31,4 @@ RKE2 的主要特性包括:
## RKE2 与 Rancher
- Rancher 允许在一系列平台上轻松配置 RKE2,包括 Amazon EC2、DigitalOcean、Azure、vSphere 或现有服务器。
-- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup#cluster-management-capabilities-by-cluster-type)。
+- Kubernetes 集群的标准 Rancher 管理,包括所有概述[集群管理功能](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup//kubernetes-clusters-in-rancher-setup.md#按集群类型划分的集群管理功能)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
index ab7f461150a..3f02e5fc4f3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md
@@ -4,7 +4,7 @@ title: Flows 和 ClusterFlows
有关如何配置 `Flow` 和 `ClusterFlow` 的完整详细信息,请参阅 [Logging Operator 文档](https://kube-logging.github.io/docs/configuration/flow/)。
-有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../../../pages-for-subheaders/logging.md#日志缓冲区导致-pod-过载)。
+有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../logging.md#日志缓冲区导致-pod-过载)。
## Flows
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
index 1ad5b65f48b..8e7d608e4a5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md
@@ -4,7 +4,7 @@ title: Outputs 和 ClusterOutputs
有关如何配置 `Flow` 和 `ClusterFlow` 的完整详细信息,请参阅 [Logging Operator 文档](https://kube-logging.github.io/docs/configuration/flow/)。
-有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../../../pages-for-subheaders/logging.md#日志缓冲区导致-pod-过载)。
+有关如何解决 Logging 缓冲区的内存问题,请参阅 [Rancher 与 Logging 服务的集成:故障排除](../logging.md#日志缓冲区导致-pod-过载)。
## Outputs
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md
index c7e0510ebe9..cbaeda896e3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-architecture.md
@@ -6,7 +6,7 @@ title: 架构
有关 Logging Operator 工作原理的更多详细信息,请参阅[官方文档](https://kube-logging.github.io/docs/#architecture)。
-### Logging Operator 工作原理
+## Logging Operator 工作原理
Logging Operator 自动部署和配置 Kubernetes 日志流水线。它会在每个节点上部署和配置一个 Fluent Bit DaemonSet,从而收集节点文件系统中的容器和应用程序日志。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md
index 5c08fb1166a..40bae8aa4de 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/logging-helm-chart-options.md
@@ -2,7 +2,7 @@
title: rancher-logging Helm Chart 选项
---
-### 启用/禁用 Windows 节点 Logging
+## 启用/禁用 Windows 节点 Logging
要启用或禁用 Windows 节点 Logging,你可以在 `values.yaml` 中将 `global.cattle.windows.enabled` 设置为 `true` 或 `false`。
@@ -17,7 +17,7 @@ title: rancher-logging Helm Chart 选项
:::
-### 使用自定义 Docker 根目录
+## 使用自定义 Docker 根目录
如果使用了自定义 Docker 根目录,你可以在 `values.yaml` 中设置 `global.dockerRootDirectory`。
@@ -27,11 +27,11 @@ title: rancher-logging Helm Chart 选项
如果集群中有任何 Windows 节点,则更改将不适用于这些节点。
-### 为自定义污点添加 NodeSelector 设置和容忍度
+## 为自定义污点添加 NodeSelector 设置和容忍度
你可以添加 `nodeSelector` 设置,并通过编辑 Logging Helm Chart 值来添加其他`容忍度`。有关详细信息,请参阅[此页面](taints-and-tolerations.md)。
-### 启用 Logging 应用程序以使用 SELinux
+## 启用 Logging 应用程序以使用 SELinux
:::note 要求:
@@ -41,11 +41,11 @@ Logging v2 已在 RHEL/CentOS 7 和 8 上使用 SELinux 进行了测试。
[安全增强型 Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) 是对 Linux 的安全增强。被政府机构使用之后,SELinux 已成为行业标准,并在 CentOS 7 和 8 上默认启用。
-要配合使用 Logging V2 与 SELinux,我们建议你根据[此说明](../../pages-for-subheaders/selinux-rpm.md)安装 `rancher-selinux` RPM。
+要配合使用 Logging V2 与 SELinux,我们建议你根据[此说明](../../reference-guides/rancher-security/selinux-rpm/selinux-rpm.md)安装 `rancher-selinux` RPM。
然后,在安装 Logging 应用程序时,在 `values.yaml` 中将 `global.seLinux.enabled` 更改为 `true`,使 Chart 支持 SELinux。
-### 其他日志来源
+## 其他日志来源
默认情况下,Rancher 会收集所有类型集群的 [controlplane 组件](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components)和[节点组件](https://kubernetes.io/docs/concepts/overview/components/#node-components)的日志。
@@ -68,7 +68,7 @@ Logging v2 已在 RHEL/CentOS 7 和 8 上使用 SELinux 进行了测试。
如果你已经使用了云提供商的日志解决方案,例如 AWS CloudWatch 或 Google Cloud Operations Suite(以前称为 Stackdriver),由于原生解决方案可以不受限制地访问所有日志,因此你无需启用此选项。
-### Systemd 配置
+## Systemd 配置
在 Rancher Logging 中,你必须为 K3s 和 RKE2 Kubernetes 发行版配置 `SystemdLogPath`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md
index 16a6a08fd26..28724022e0a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/logging/taints-and-tolerations.md
@@ -16,7 +16,7 @@ title: 处理污点和容忍度
- [为自定义污点添加 NodeSelector 设置和容忍度](#为自定义污点添加-nodeselector-设置和容忍度)
-### Rancher 日志堆栈中的默认实现
+## Rancher 日志堆栈中的默认实现
默认情况下,Rancher 使用 `cattle.io/os=linux` 来将污点应用到所有 Linux 节点,而不影响 Windows 节点。
日志堆栈 pod 具有针对此污点的`容忍度`,因此它们能够运行在 Linux 节点上。
@@ -43,7 +43,7 @@ spec:
你可以对 Rancher 现有的污点或你自己的自定义污点执行相同的操作。
-### 为自定义污点添加 NodeSelector 设置和容忍度
+## 为自定义污点添加 NodeSelector 设置和容忍度
如果要添加你自己的 `nodeSelector` 设置,或者要为其他污点添加 `容忍度`,你可以将以下内容传递给 Chart 的值:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn.md
index ea34d0938ae..53ffddc28d2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn.md
@@ -21,7 +21,7 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现

-### 使用 Rancher 安装 Longhorn
+## 使用 Rancher 安装 Longhorn
1. 满足所有[安装要求](https://longhorn.io/docs/latest/deploy/install/#installation-requirements)。
1. 转到要安装 Longhorn 的集群。
@@ -33,14 +33,14 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
**结果**:Longhorn 已部署到 Kubernetes 集群中。
-### 从 Rancher UI 访问 Longhorn
+## 从 Rancher UI 访问 Longhorn
1. 转到安装了 Longhorn 的集群。在左侧导航菜单中,单击 **Longhorn**。
1. 在此页面上,你可以编辑 Longhorn 管理的 Kubernetes 资源。要查看 Longhorn UI,请单击**概述**中的 **Longhorn** 按钮。
**结果**:你将转到 Longhorn UI,你可以在那里管理 Longhorn 卷及其在 Kubernetes 集群中的副本,还可以查看位于另一个 Kubernetes 集群或 S3 中的 Longhorn 存储辅助备份。
-### 从 Rancher UI 卸载 Longhorn
+## 从 Rancher UI 卸载 Longhorn
1. 转到安装了 Longhorn 的集群,然后单击 **Apps**。
1. 点击**已安装的应用**。
@@ -49,15 +49,15 @@ Longhorn 是免费的开源软件。Longhorn 最初由 Rancher Labs 开发,现
**结果**:Longhorn 已被卸载。
-### GitHub 仓库
+## GitHub 仓库
Longhorn 项目在[此处](https://github.com/longhorn/longhorn)。
-### 文档
+## 文档
Longhorn 文档在[此处](https://longhorn.io/docs/)。
-### 架构
+## 架构
Longhorn 为每个卷创建专用的存储控制器,并在存储在多个节点上的多个副本之间同步复制该卷。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn/overview.md
index b5298c3c21d..dd02b06d713 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/longhorn/overview.md
@@ -25,7 +25,7 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被

-### 使用 Rancher 安装 Longhorn
+## 使用 Rancher 安装 Longhorn
1. 满足所有[安装要求](https://longhorn.io/docs/latest/deploy/install/#installation-requirements)。
1. 转到要安装 Longhorn 的集群。
@@ -37,14 +37,14 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被
**结果**:Longhorn 已部署到 Kubernetes 集群中。
-### 从 Rancher UI 访问 Longhorn
+## 从 Rancher UI 访问 Longhorn
1. 转到安装了 Longhorn 的集群。在左侧导航菜单中,单击 **Longhorn**。
1. 在此页面上,你可以编辑 Longhorn 管理的 Kubernetes 资源。要查看 Longhorn UI,请单击**概述**中的 **Longhorn** 按钮。
**结果**:你将转到 Longhorn UI,在这里你可以管理 Kubernetes 集群中的 Longhorn 卷及其副本,以及可能存在于另一个 Kubernetes 集群或 S3 中的 Longhorn 存储辅助备份。
-### 从 Rancher UI 卸载 Longhorn
+## 从 Rancher UI 卸载 Longhorn
1. 转到安装了 Longhorn 的集群,然后单击 **Apps**。
1. 点击**已安装的应用**。
@@ -53,15 +53,15 @@ Longhorn 是免费的开源软件。它最初由 Rancher Labs 开发,现在被
**结果**:Longhorn 已被卸载。
-### GitHub 仓库
+## GitHub 仓库
Longhorn 项目可在[此处](https://github.com/longhorn/longhorn)获取。
-### 文档
+## 文档
Longhorn 文档在[此处](https://longhorn.io/docs/)。
-### 架构
+## 架构
Longhorn 为每个卷创建专用的存储控制器,并在多个节点上存储的多个副本之间同步复制该卷。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
index cebb2e4323f..b181ee890cb 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md
@@ -15,7 +15,7 @@ description: Prometheus 允许你查看来自不同 Rancher 和 Kubernetes 对
使用 `rancher-monitoring` 应用程序,你可以快速部署领先的开源监控和告警解决方案到你的集群上。
-### 功能
+## 功能
Prometheus 支持查看 Rancher 和 Kubernetes 对象的指标。通过使用时间戳,Prometheus 能让你通过 Rancher UI 或 Grafana(与 Prometheus 一起部署的分析查看平台)以更容易阅读的图表和视觉形式来查询和查看这些指标。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
index dfeefda6f97..d068c7c77fa 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md
@@ -107,7 +107,7 @@ Monitoring 还会创建其他 `ClusterRole`,这些角色默认情况下不会
| 角色 | 用途 |
| ------------------------------| ---------------------------|
-| monitoring-ui-view | _自 Monitoring v2 14.5.100+ 起可用_ 此 ClusterRole 允许用户在 Rancher UI 中查看指定集群的指标图。这是通过授予对外部监控 UI 的只读访问权限来实现的。具有此角色的用户有权限列出 Prometheus、Alertmanager 和 Grafana 端点,并通过 Rancher 代理向 Prometheus、Grafana 和 Alertmanager UI 发出 GET 请求。 |
+| monitoring-ui-view | _自 Monitoring v2 14.5.100+ 起可用_ 此 ClusterRole 允许用户在 Rancher UI 中查看指定集群的指标图。这是通过授予对外部监控 UI 的只读访问权限来实现的。具有此角色的用户有权限列出 Prometheus、Alertmanager 和 Grafana 端点,并通过 Rancher 代理向 Prometheus、Grafana 和 Alertmanager UI 发出 GET 请求。 |
### 使用 kubectl 分配 Role 和 ClusterRole
@@ -203,7 +203,7 @@ Rancher 部署的默认角色(即 cluster-owner、cluster-member、project-own
| Rancher 角色 | Kubernetes ClusterRole | 可用 Rancher 版本 | 可用 Monitoring V2 版本 |
|--------------------------|-------------------------------|-------|------|
-| 查看 Monitoring\* | [monitoring-ui-view](#monitoring-ui-view) | 2.4.8+ | 9.4.204+ |
+| 查看 Monitoring\* | [monitoring-ui-view](#其他监控集群角色) | 2.4.8+ | 9.4.204+ |
\* 如果某个用户绑定了 Rancher 的 **View Monitoring** 角色,该用户只有在有 UI 链接时才有权访问外部 Monitoring UI。要访问 Monitoring Pane 以获取这些链接,用户必须是至少一个项目的项目成员。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector.md
index 42a2e900c59..f14ae63cbb8 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector.md
@@ -2,13 +2,13 @@
title: NeuVector 集成
---
-### Rancher 中的 NeuVector 集成
+## Rancher 中的 NeuVector 集成
-[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../pages-for-subheaders/rancher-security.md)。
+[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../reference-guides/rancher-security/rancher-security.md)。
NeuVector 可以通过 Helm Chart 启用。你可以在 **Apps** 或 Rancher UI 中的 **Cluster Tools** 中安装该 Chart。安装 Helm Chart 后,用户可以轻松地[在 Rancher 中部署和管理 NeuVector 集群](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace)。
-### 使用 Rancher 安装 NeuVector
+## 使用 Rancher 安装 NeuVector
Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可以在 Rancher 中直接跳转,然后部署和管理 NeuVector 集群。
@@ -40,12 +40,12 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击左侧导航栏底部的**集群工具**。
1. 按照上面的步骤 4 相应地选择你的容器运行时,然后再次单击**安装**。
-### 从 Rancher UI 访问 NeuVector
+## 从 Rancher UI 访问 NeuVector
1. 导航到安装了 NeuVector 的集群的 Cluster Explorer。在左侧导航栏中,单击 **NeuVector**。
1. 单击外部链接以转到 NeuVector UI。选择链接后,用户必须接受`最终用户许可协议`才能访问 NeuVector UI。
-### 从 Rancher UI 卸载 NeuVector
+## 从 Rancher UI 卸载 NeuVector
**通过 "Apps" 卸载**:
@@ -58,15 +58,15 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击 **☰ > 集群管理**。
1. 单击屏幕左下角的**集群工具**,然后单击 NeuVector Chart 下方的垃圾桶图标。如果需要,选择`删除与此应用关联的 CRD`,然后单击**删除**。
-### GitHub 仓库
+## GitHub 仓库
NeuVector 项目在[这里](https://github.com/neuvector/neuvector)。
-### 文档
+## 文档
NeuVector 文档在[这里](https://open-docs.neuvector.com/)。
-### 架构
+## 架构
NeuVector 安全解决方案包含四种类型的安全容器,分别是 Controller、Enforcer、Manager 和 Scanner。它还提供了一个称为 All-in-One 的特殊容器(主要用于 Docker 原生部署),能将 Controller、Enforcer 和 Manager 功能组合在一个容器中。此外,还有一个 Updater,运行该程序时会更新 CVE 数据库。
@@ -87,7 +87,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
要了解有关 NeuVector 架构的更多信息,请参阅[此处](https://open-docs.neuvector.com/basics/overview#architecture)。
-### CPU 和内存分配
+## CPU 和内存分配
以下是默认 NeuVector Chart 安装部署的最低计算资源推荐。请注意,未设置资源限制。
@@ -101,7 +101,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
\* Controller、Manager 和 Scanner 容器合计至少需要 1GB 内存。
-### 强化集群支持 - Calico 和 Canal
+## 强化集群支持 - Calico 和 Canal
@@ -158,7 +158,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
-### 启用 SELinux 的集群支持 - Calico 和 Canal
+## 启用 SELinux 的集群支持 - Calico 和 Canal
要在 RKE2 集群上启用 SELinux,请执行以下步骤:
@@ -175,12 +175,12 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{
kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}'
```
-### 离线环境中的集群支持
+## 离线环境中的集群支持
- 所有 NeuVector 组件都可部署在离线环境中的集群上,无需任何额外配置。
-### 支持限制
+## 支持限制
* 目前仅支持管理员和集群所有者。
@@ -189,7 +189,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
* Windows 集群不支持 NeuVector。
-### 其他限制
+## 其他限制
* 目前,如果 NeuVector partner Chart 已存在,则 NeuVector 功能 Chart 的安装会失败。要解决此问题,请卸载 NeuVector partner Chart 并重新安装 NeuVector 功能 Chart。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector/overview.md
index 2a23c695335..b4ee0d1e6ef 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector/overview.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/integrations-in-rancher/neuvector/overview.md
@@ -6,13 +6,13 @@ title: 概述
-### Rancher 中的 NeuVector 集成
+## Rancher 中的 NeuVector 集成
[NeuVector 5.x](https://open-docs.neuvector.com/) 是一个开源的,以容器为中心的安全应用程序,Rancher 已集成 NeuVector。NeuVector 在运行时为关键应用程序和数据提供实时的合规、可见和保护功能。NeuVector 提供具有 CIS Benchmark 和漏洞扫描的防火墙、容器进程/文件系统监控和安全审计。有关 Rancher 安全性的更多信息,请参阅[安全文档](../../reference-guides/rancher-security)。
NeuVector 可以通过 Helm Chart 启用。你可以在 **Apps** 或 Rancher UI 中的 **Cluster Tools** 中安装该 Chart。安装 Helm Chart 后,用户可以轻松地[在 Rancher 中部署和管理 NeuVector 集群](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace)。
-### 使用 Rancher 安装 NeuVector
+## 使用 Rancher 安装 NeuVector
Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可以在 Rancher 中直接跳转,然后部署和管理 NeuVector 集群。
@@ -44,12 +44,12 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 点击左侧导航栏底部的**集群工具**。
1. 按照上面的步骤 4 相应地选择你的容器运行时,然后再次单击**安装**。
-### 从 Rancher UI 访问 NeuVector
+## 从 Rancher UI 访问 NeuVector
1. 导航到安装了 NeuVector 的集群的 Cluster Explorer。在左侧导航栏中,单击 **NeuVector**。
1. 单击外部链接以转到 NeuVector UI。选择链接后,用户必须接受`最终用户许可协议`才能访问 NeuVector UI。
-### 从 Rancher UI 卸载 NeuVector
+## 从 Rancher UI 卸载 NeuVector
**通过 Apps 卸载:**
@@ -62,15 +62,15 @@ Harvester Helm Chart 用于管理 Rancher 中 NeuVector UI 的访问,用户可
1. 单击 **☰ > 集群管理**。
1. 单击屏幕左下角的**集群工具**,然后单击 NeuVector Chart 下方的垃圾桶图标。如果需要,选择`删除与此应用关联的 CRD`,然后单击**删除**。
-### GitHub 仓库
+## GitHub 仓库
NeuVector 项目在[这里](https://github.com/neuvector/neuvector)。
-### 文档
+## 文档
NeuVector 文档在[这里](https://open-docs.neuvector.com/)。
-### 架构
+## 架构
NeuVector 安全解决方案包含四种类型的安全容器,分别是 Controller、Enforcer、Manager 和 Scanner。它还提供了一个称为 All-in-One 的特殊容器(主要用于 Docker 原生部署),能将 Controller、Enforcer 和 Manager 功能组合在一个容器中。此外,还有一个 Updater,运行该程序时会更新 CVE 数据库。
@@ -91,7 +91,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
要了解有关 NeuVector 架构的更多信息,请参阅[此处](https://open-docs.neuvector.com/basics/overview#architecture)。
-### CPU 和内存分配
+## CPU 和内存分配
以下是默认 NeuVector Chart 安装部署的最低计算资源推荐。请注意,未设置资源限制。
@@ -104,7 +104,7 @@ NeuVector 安全解决方案包含四种类型的安全容器,分别是 Contro
\* Controller、Manager 和 Scanner 容器合计至少需要 1GB 内存。
-### 强化集群支持 - Calico 和 Canal
+## 强化集群支持 - Calico 和 Canal
@@ -159,7 +159,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
-### 启用 SELinux 的集群支持 - Calico 和 Canal
+## 启用 SELinux 的集群支持 - Calico 和 Canal
要在 RKE2 集群上启用 SELinux,请执行以下步骤:
@@ -175,11 +175,11 @@ kubectl patch deploy neuvector-scanner-pod -n cattle-neuvector-system --patch '{
kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"securityContext":{"runAsUser": 5400}}}}}}}'
```
-### 离线环境中的集群支持
+## 离线环境中的集群支持
- 所有 NeuVector 组件都可部署在离线环境中的集群上,无需任何额外配置。
-### 支持限制
+## 支持限制
- 目前仅支持管理员和集群所有者。
@@ -187,7 +187,7 @@ kubectl patch cronjob neuvector-updater-pod -n cattle-neuvector-system --patch '
- Windows 集群不支持 NeuVector。
-### 其他限制
+## 其他限制
- 目前,如果 NeuVector partner Chart 已存在,则 NeuVector 功能 Chart 的安装会失败。要解决此问题,请卸载 NeuVector partner Chart 并重新安装 NeuVector 功能 Chart。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-provisioning-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-provisioning-drivers.md
deleted file mode 100644
index 65868b38bd3..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-provisioning-drivers.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-title: 配置驱动
----
-
-使用 Rancher 中的驱动,你可以管理可以使用哪些供应商来部署[托管的 Kubernetes 集群](set-up-clusters-from-hosted-kubernetes-providers.md)或[云服务器节点](use-new-nodes-in-an-infra-provider.md),以允许 Rancher 部署和管理 Kubernetes。
-
-### Rancher 驱动
-
-你可以启用或禁用 Rancher 中内置的驱动。如果相关驱动 Rancher 尚未实现,你可以添加自己的驱动。
-
-Rancher 中有两种类型的驱动:
-
-* [集群驱动](#集群驱动)
-* [主机驱动](#主机驱动)
-
-### 集群驱动
-
-集群驱动用于配置[托管的 Kubernetes 集群](set-up-clusters-from-hosted-kubernetes-providers.md),例如 GKE、EKS、AKS 等。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将显示为为托管 Kubernetes 集群创建集群的选项。默认情况下,Rancher 与几个现有的集群驱动打包在一起,但你也可以创建自定义集群驱动并添加到 Rancher。
-
-默认情况下,Rancher 已激活多个托管 Kubernetes 云提供商,包括:
-
-* [Amazon EKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md)
-* [Google GKE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md)
-* [Azure AKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md)
-
-还有几个托管的 Kubernetes 云提供商是默认禁用的,但也打包在 Rancher 中:
-
-* [Alibaba ACK](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md)
-* [Huawei CCE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md)
-* [Tencent](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md)
-
-### 主机驱动
-
-主机驱动用于配置主机,Rancher 使用这些主机启动和管理 Kubernetes 集群。主机驱动与 [Docker Machine 驱动](https://docs.docker.com/machine/drivers/)相同。创建主机模板时可以显示的主机驱动,是由主机驱动的状态定义的。只有 `active` 主机驱动将显示为创建节点模板的选项。默认情况下,Rancher 与许多现有的 Docker Machine 驱动打包在一起,但你也可以创建自定义主机驱动并添加到 Rancher。
-
-如果你不想向用户显示特定的主机驱动,则需要停用这些主机驱动。
-
-Rancher 支持几家主要的云提供商,但默认情况下,这些主机驱动处于 active 状态并可供部署:
-
-* [Amazon EC2](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)
-* [Azure](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md)
-* [Digital Ocean](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md)
-* [vSphere](vsphere.md)
-
-还有其他几个默认禁用的主机驱动,但打包在 Rancher 中:
-
-* [Harvester](../integrations-in-rancher/harvester.md#harvester-主机驱动) - 在 Rancher 2.6.1 中可用
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-configuration.md
deleted file mode 100644
index cf942d3dac5..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-configuration.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: 高级配置
----
-
-### Alertmanager
-
-有关配置 Alertmanager 自定义资源的信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md)。
-
-### Prometheus
-
-有关配置 Prometheus 自定义资源的信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md)。
-
-### PrometheusRules
-
-有关配置 Prometheus 自定义资源的信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/air-gapped-helm-cli-install.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/air-gapped-helm-cli-install.md
deleted file mode 100644
index b5b58f77e56..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/air-gapped-helm-cli-install.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-title: 离线 Helm CLI 安装
----
-
-本文介绍如何使用 Helm CLI 在离线环境中安装 Rancher Server。离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-
-Rancher 安装在 RKE Kubernetes 集群、K3s Kubernetes 集群,或单个 Docker 容器上对应的安装步骤会有所不同。
-
-如需了解各个安装方式的更多信息,请参见[本页](installation-and-upgrade.md)。
-
-在安装指导中,我们为不同的安装选项提供对应的 _选项卡_ 。
-
-:::note 重要提示:
-
-如果你按照 Docker 安装指南安装 Rancher,你将没有把 Docker 安装转换为 Kubernetes 安装的升级途径。
-
-:::
-
-## 安装概要
-
-1. [设置基础设施和私有镜像仓库](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md)
-2. [收集镜像到私有镜像仓库](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md)
-3. [设置 Kubernetes 集群(如果你使用 Docker 安装,请跳过此步骤)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md)
-4. [安装 Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md)
-
-## 升级
-
-如需在离线环境中使用 Helm CLI 升级 Rancher,请按照[升级步骤](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md)进行操作。
-
-### 后续操作
-[准备节点](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-config.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-config.md
deleted file mode 100644
index a77cbe0b204..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-config.md
+++ /dev/null
@@ -1,132 +0,0 @@
----
-title: 身份验证配置
-weight: 10
----
-
-Rancher 向 Kubernetes 添加的关键功能之一,就是集中式用户身份验证。此功能允许你的用户使用一组凭证对你的所有 Kubernetes 集群进行身份验证。
-
-这种集中式的用户身份验证是使用 Rancher 身份验证代理完成的,该代理与 Rancher 的其他组件一起安装。这个代理验证你的用户,并使用一个 ServiceAccount 将用户请求转发到你的 Kubernetes 集群。
-
-## 外部验证与本地验证
-
-Rancher 身份验证代理支持与以下外部身份验证服务集成:
-
-| 验证服务 |
-| ------------------------------------------------------------------------------------------------ |
-| [Microsoft Active Directory](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md) |
-| [GitHub](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md) |
-| [Microsoft Azure AD](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md) |
-| [FreeIPA](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md) |
-| [OpenLDAP](configure-openldap.md) |
-| [Microsoft AD FS](configure-microsoft-ad-federation-service-saml.md) |
-| [PingIdentity](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-pingidentity.md) |
-| [Keycloak (OIDC)](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-oidc.md) |
-| [Keycloak (SAML)](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-saml.md) |
-| [Okta](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md) |
-| [Google OAuth](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-google-oauth.md) |
-| [Shibboleth](configure-shibboleth-saml.md) |
-
-同时,Rancher 也提供了[本地身份验证](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/create-local-users.md)。
-
-大多数情况下,应该使用外部身份验证服务,而不是本地身份验证,因为外部身份验证允许对用户进行集中管理。但是你可能需要一些本地身份验证用户,以便在特定的情况下(例如在外部身份验证系统不可用或正在进行维护时)管理 Rancher。
-
-## 用户和组
-
-Rancher 依赖用户和组来决定允许登录到 Rancher 的用户,以及他们可以访问哪些资源。使用外部系统进行身份验证时,将由外部系统提供用户和组。这些用户和组被赋予集群、项目、多集群应用、全局 DNS 提供商等资源的特定角色。当你将访问权限授予某个组时,身份验证提供程序中属于该组的所有用户都将能够使用你指定的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](manage-role-based-access-control-rbac.md)。
-
-:::note
-
-本地认证不支持创建或管理用户组。
-
-:::
-
-详情请参见[用户和组](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md)。
-
-## Rancher 授权范围
-
-将 Rancher 配置成允许使用外部验证提供程序登录后,你需要配置允许登录和使用 Rancher 的用户。可用选项如下:
-
-| 访问级别 | 描述 |
-|----------------------------------------------|-------------|
-| 允许任何有效用户 | 授权服务中的 _任何_ 用户都可以访问Rancher。通常不建议使用此设置。 |
-| 允许集群和项目成员,以及授权的用户和组织 | 认证服务中的任何用户,以及添加为**集群成员**或**项目成员**的任何组都可以登录到 Rancher。此外,添加到**授权用户和组织**列表中的身份验证服务中的任何用户和组都能登录到 Rancher。 |
-| 仅允许授权用户和组织 | 只有添加到**授权用户和组织**的身份验证服务中的用户和组能登录 Rancher。 |
-
-要在授权服务中为用户设置 Rancher 访问级别,请执行以下步骤:
-
-1. 在左上角,单击 **☰ > 用户 & 认证**。
-1. 单击左侧导航栏的**认证**。
-1. 设置好认证提供程序的配置后,使用 **Site Access** 选项来配置用户的授权范围。上表说明了每个选项的访问级别。
-1. 可选:如果你选择**允许任何有效用户**以外的选项,你可以通过在显示的文本字段中搜索用户,将用户添加到**授权用户和组织**的列表中。
-1. 单击**保存**。
-
-**结果**:Rancher 访问配置已应用。
-
-:::note SAML 身份提供商注意事项
-
-- SAML 协议不支持搜索或查找用户或组。因此,将用户或组添加到 Rancher 时不会对其进行验证。
-- 添加用户时,必须正确输入确切的用户 ID(即 `UID` 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
-- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
-- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
-
-:::
-
-## 外部身份验证配置和用户主体
-
-配置外部认证需要:
-
-- 分配了管理员角色的本地用户,以下称为 _本地主体_。
-- 可以使用外部认证服务进行认证的外部用户,以下称为 _外部主体_。
-
-外部身份验证的配置将影响 Rancher 中主体用户的管理方式。按照下面的列表来更好地理解这些影响。
-
-1. 作为本地主体登录到 Rancher 并完成外部身份验证的配置。
-
- 
-
-2. Rancher 将外部主体与本地主体相关联。这两个用户共享本地主体的用户 ID。
-
- 
-
-3. 完成配置后,Rancher 将自动退出本地主体。
-
- 
-
-4. 然后,Rancher 会自动将你作为外部主体重新登录。
-
- 
-
-5. 由于外部主体与本地主体共享一个 ID,因此**用户**页面不会再单独显示外部主体的对象。
-
- 
-
-6. 外部主体和本地主体共享相同的访问权限。
-
-:::note 重新配置以前设置的身份验证提供程序
-
-如果你需要重新配置或禁用以前设置的提供程序然后再重新启用它,请确保进行此操作的用户使用外部用户身份登录 Rancher,而不是本地管理员。
-
-:::
-
-## 禁用认证提供程序
-
-禁用身份认证提供程序时,Rancher 会删除与其关联的所有资源,例如:
-- Secrets
-- 全局角色绑定。
-- 集群角色模板绑定。
-- 项目角色模板绑定。
-- 与提供商关联的外部用户,但是这些用户从未以本地用户身份登录到 Rancher。
-
-由于此操作可能会导致许多资源丢失,因此你可能希望在提供程序上添加保护措施。为确保在禁用身份认证提供程序时不会运行此清理,请向相应的身份认证配置添加特殊注释。
-
-例如,要为 Azure AD 提供程序添加安全措施,请注释 `azuread` authconfig 对象:
-
-`kubectl annotate --overwrite authconfig azuread management.cattle.io/auth-provider-cleanup='user-locked'`
-
-在你将注释设置为 `unlocked` 之前,Rancher 不会执行清理。
-
-### 手动运行资源清理
-
-即使在你配置了另一个身份认证提供程序,Rancher 也可能会保留 local 集群中已禁用的身份认证提供程序配置的资源。例如,如果你使用 Provider A,然后禁用了它并开始使用 Provider B,当你升级到新版本的 Rancher 时,你可以手动触发对 Provider A 配置的资源的清理。
-
-要为已禁用的身份认证提供程序手动触发清理,请将带有 `unlocked` 值的 `management.cattle.io/auth-provider-cleanup` 注释添加到 auth 配置中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-permissions-and-global-configuration.md
deleted file mode 100644
index 88ed5f38751..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/authentication-permissions-and-global-configuration.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-title: 身份验证、权限和全局设置
----
-
-安装完成后,[系统管理员](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)需要配置 Rancher 来配置身份验证,安全,默认设定,安全策略,驱动和全局 DNS 条目。
-
-## 首次登录
-
-首次登录 Rancher 后,Rancher 会提示你输入 **Rancher Server URL**。你需要将 URL 设置为 Rancher Server 的主要入口点。当负载均衡器位于 Rancher Server 集群前面时,URL 需要设置为负载均衡器地址。系统会自动尝试从运行 Rancher Server 的主机的 IP 地址或主机名推断 Rancher Server 的URL,上述推断仅在你运行单节点 Rancher Server 时才正确。因此,在大多数情况下,你需要自己将 Rancher Server 的 URL 设置为正确的值。
-
-:::danger
-
-Rancher Server 的 URL 在设置后不可再更新。因此,你需要谨慎设置该 URL。
-
-:::
-
-## 身份验证
-
-Rancher 向 Kubernetes 添加的关键功能之一,就是集中式用户身份验证。此功能允许将本地用户连接到外部身份验证系统,使用该系统的用户和组进行身份验证。
-
-有关身份验证如何工作及如何设置外部身份认证系统,请参见[身份验证](authentication-config.md)。
-
-## 授权
-
-Rancher 通过 _用户_ 进行授权管理。用户的 _授权_ 或系统访问权限由用户角色决定。Rancher 提供了预设角色,让你轻松配置用户对资源的权限,还提供了为每个 Kubernetes 资源定制角色的能力。
-
-有关授权如何工作及如何自定义角色,请参见 [RBAC](manage-role-based-access-control-rbac.md)。
-
-## Pod 安全策略
-
-_Pod 安全策略(PSP)_ 是用来控制安全敏感相关 Pod 规范(例如 root 特权)的对象。如果某个 Pod 不满足 PSP 指定的条件,Kubernetes 将不允许它启动,并在 Rancher 中显示错误消息。
-
-有关如何创建和使用 PSP,请参见 [Pod 安全策略](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md)。
-
-## 配置驱动
-
-使用 Rancher 中的驱动,你可以管理可以使用哪些供应商来配置[托管的 Kubernetes 集群](set-up-clusters-from-hosted-kubernetes-providers.md)或[云服务器节点](use-new-nodes-in-an-infra-provider.md),以允许 Rancher 部署和管理 Kubernetes。
-
-详情请参考[配置驱动](about-provisioning-drivers.md)。
-
-## 添加 Kubernetes 版本到 Rancher
-
-你可以通过这个功能,在不升级 Rancher 的情况下,升级到最新发布的 Kubernetes 版本。Kubernetes 倾向于在次要版本删除或新增 API 接口。本功能让你轻松升级 Kubernetes 补丁版本(即 `v1.15.X`),但不升级 Kubernetes 次要版本(即 `v1.X.0`)。
-
-Rancher 用于配置 [RKE 集群](launch-kubernetes-with-rancher.md) 的信息现在位于 Rancher Kubernetes 元数据中。有关元数据配置以及如何更改用于配置 RKE 集群的 Kubernetes 版本,请参见 [Rancher Kubernetes 元数据。](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md)
-
-Rancher 用于配置 [RKE 集群](launch-kubernetes-with-rancher.md)的 Kubernetes 版本信息包含在 Rancher Kubernetes 元数据中。
-
-有关元数据如何工作以及如何配置元数据,请参见 [Rancher Kubernetes 元数据](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md)。
-
-## 全局设置
-
-顶部导航栏中提供了控制全局级别 Rancher 设置的选项。
-
-点击左上角的 **☰**,然后选择**全局设置**来查看并进行配置:
-
-- **设置**:各种 Rancher 默认值,例如用户密码的最小长度 (`password-min-length`)。需要小心修改这些设置,因为无效的值可能会破坏 Rancher 安装。
-- **功能开关**:打开或关闭的 Rancher 功能。其中一些是[实验功能](#启用实验功能)。
-- **横幅**:可以添加到门户上固定位置的元素。例如,你可以使用这些选项在用户登录 Rancher 时[设置自定义横幅](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md#固定横幅)。
-- **品牌**:可以[自定义](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md)的 Rancher UI 设计元素。你可以添加自定义徽标或图标,并修改 UI 颜色。
-- **性能**:Rancher UI 的性能设置,例如增量资源加载。
-- **主页链接**:Rancher UI **主页**上显示的链接。你可以修改默认链接的可见性或添加你自己的链接。
-
-### 启用实验功能
-
-Rancher 包含一些实验性或默认禁用的功能。你可以使用功能开关来启用这些功能。详情请参见[功能开关](enable-experimental-features.md)的章节。
-
-### 全局设置
-
-除非你激活了**旧版**[功能开关](enable-experimental-features.md),否则**全局配置**选项不可见。v2.6 及更高版本的 Rancher 默认禁用 **legacy** 标志。如果你从旧 Rancher 版本升级,或者在 Rancher v2.6 及更高版本上激活了 **legacy** 功能开关,则可以从顶部导航菜单访问**全局设置**:
-
-1. 点击左上角的 **☰**。
-1. 从**旧版应用**中选择**全局设置**。
-
-**全局设置**提供了以下功能:
-
-- **应用商店**
-- **全局 DNS 条目**
-- **全局 DNS 提供商**
-
-由于这些是旧版功能,因此请参阅有关[应用商店](/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md)、[全局 DNS 条目](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry)和[全局 DNS 提供商](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider)的 Rancher v2.0-v2.4 文档了解更多详情。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/aws-cloud-marketplace.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/aws-cloud-marketplace.md
deleted file mode 100644
index 980a427cf4d..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/aws-cloud-marketplace.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-title: AWS Marketplace 集成
----
-
-## 概述
-
-Rancher 提供了与 AWS Marketplace 的集成,允许用户向 SUSE 购买支持。此集成帮助你在需要使用更多集群时轻松调整支持需求。
-
-## 限制
-
-- 必须使用 Rancher v2.6.7 或更高版本。
-- Rancher 必须在启用其他指标的情况下进行部署。
-- Rancher 必须安装在 EKS 集群上。
-- 必须通过 AWS Marketplace 购买至少一项 Rancher 支持的 Entitlement。
-- 你可能需要额外的设置来支持代理/离线用例。有关详细信息,请参阅[先决条件](../integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md)。
-
-## 如何使用
-
-1. 完成[先决条件步骤](../integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/adapter-requirements.md)。
-2. [安装 CSP Adapter](../integrations-in-rancher/cloud-marketplace/aws-cloud-marketplace/install-adapter.md)。
-
-## 常见问题
-
-**我以后后续再购买更多节点的支持吗?**
-
-是的。你需要转到最初购买支持的 AWS Marketplace 条目并增加 Entitlement 的数量。
-
-**我可以在同一个 AWS 账户中使用多个 Rancher 实例吗?**
-
-是的。但是,安装 Rancher 的每个集群都需要遵守先决条件。
-
-此外,一个 Entitlement 每次只能由一台 Rancher management server 使用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-configuration.md
deleted file mode 100644
index b016cbe2f97..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-configuration.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Rancher 备份配置参考
----
-
-- [备份配置](../reference-guides/backup-restore-configuration/backup-configuration.md)
-- [还原配置](../reference-guides/backup-restore-configuration/restore-configuration.md)
-- [存储位置配置](../reference-guides/backup-restore-configuration/storage-configuration.md)
-- [Backup 和 Restore 自定义资源示例](../reference-guides/backup-restore-configuration/examples.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cis-scan-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cis-scan-guides.md
deleted file mode 100644
index 269edaaec4a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cis-scan-guides.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: CIS 扫描指南
----
-
-- [安装 rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md)
-- [卸载 rancher-cis-benchmark](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md)
-- [运行扫描](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md)
-- [定期运行扫描](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md)
-- [跳过测试](../how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md)
-- [查看报告](../how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md)
-- [为 rancher-cis-benchmark 启用告警](../how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md)
-- [为定时扫描配置告警](../how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md)
-- [创建要运行的自定义 Benchmark 版本](../how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cli-with-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cli-with-rancher.md
deleted file mode 100644
index f1e68727a8d..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cli-with-rancher.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: Rancher CLI
----
-
-Rancher CLI 是一个命令行工具,用于在工作站中与 Rancher 进行交互。以下文档将描述 [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) 和 [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cloud-marketplace.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cloud-marketplace.md
deleted file mode 100644
index 1ef64aa10a8..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cloud-marketplace.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-title: 云市场集成
----
-
-Rancher 提供与云市场的集成,让你能轻松购买云提供商上的安装支持。此外,该集成还支持生成 supportconfig bundle,你可以将该 bundle 提供给 Rancher。
-
-此集成仅支持 AWS。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cluster-configuration.md
deleted file mode 100644
index 68908371deb..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/cluster-configuration.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-title: 集群配置
----
-
-使用 Rancher 配置 Kubernetes 集群后,你仍然可以编辑集群的选项和设置。
-
-有关编辑集群成员资格的信息,请转至[此页面](../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md)。
-
-### 集群配置参考
-
-集群配置选项取决于 Kubernetes 集群的类型:
-
-- [RKE 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)
-- [RKE2 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)
-- [K3s 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md)
-- [EKS 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md)
-- [GKE 集群配置](gke-cluster-configuration.md)
-- [AKS 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md)
-
-### 不同类型集群的管理功能
-
-对于已有集群而言,可提供的选项和设置取决于你配置集群的方法。
-
-下表总结了每一种类型的集群和对应的可编辑的选项和设置:
-
-import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md';
-
-
-
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/custom-resource-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/custom-resource-configuration.md
deleted file mode 100644
index 602ae0bfb63..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/custom-resource-configuration.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: 自定义资源配置
----
-
-以下自定义资源定义(Custom Resource Definition,CRD)用于配置 Logging:
-
-- [Flow 和 ClusterFlow](../integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md)
-- [Output 和 ClusterOutput](../integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-apps-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-apps-across-clusters.md
deleted file mode 100644
index 40e099ac09a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-apps-across-clusters.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 跨集群部署应用
----
-
-
-Rancher 2.5 引入了 Fleet,这是一种跨集群部署应用的新方式。
-
-使用 Fleet 的持续交付是大规模的 GitOps。如需更多信息,请参阅 [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md)。
-
-### 多集群应用
-
-在 2.5 之前的 Rancher 版本中,多集群应用功能用于跨集群部署应用。我们已弃用多集群应用功能,但你仍然可以在 Rancher 2.5 中使用该功能。
-
-详情请参阅[此文档](../how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-manager.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-manager.md
deleted file mode 100644
index c20dbce3552..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-manager.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: 部署 Rancher Server
----
-
-你可使用以下指南之一,在你选择的提供商中部署和配置 Rancher 和 Kubernetes 集群。
-
-- [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md)(使用 Terraform)
-- [AWS Marketplace](../getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md)(使用 Amazon EKS)
-- [Azure](../getting-started/quick-start-guides/deploy-rancher-manager/azure.md)(使用 Terraform)
-- [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md)(使用 Terraform)
-- [GCP](../getting-started/quick-start-guides/deploy-rancher-manager/gcp.md)(使用 Terraform)
-- [Hetzner Cloud](../getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md)(使用 Terraform)
-- [Vagrant](../getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md)
-- [Equinix Metal](../getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md)
-- [Outscale](../getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md)(使用 Terraform)
-
-如有需要,你可以查看以下指南以了解分步步骤。如果你需要在其他提供商中或本地运行 Rancher,或者你只是想看看它是多么容易上手,你可阅读以下指南:
-
-- [手动安装](../getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-workloads.md
deleted file mode 100644
index 01d72560dde..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/deploy-rancher-workloads.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: 部署工作负载
----
-
-这些指南指导你完成一个应用的部署,包括如何将应用暴露在集群之外使用。
-
-- [部署带有 Ingress 的工作负载](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md)
-- [部署带有 NodePort 的工作负载](../getting-started/quick-start-guides/deploy-workloads/nodeports.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/downstream-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/downstream-cluster-configuration.md
deleted file mode 100644
index 3d09efe4a8e..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/downstream-cluster-configuration.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 下游集群配置
----
-
-以下文档将讨论[节点模板配置](./node-template-configuration.md)和[主机配置](./machine-configuration.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/fleet-gitops-at-scale.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/fleet-gitops-at-scale.md
deleted file mode 100644
index c27fe57a2cd..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/fleet-gitops-at-scale.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-title: 使用 Feet 进行持续交付
----
-
-使用 Fleet 的持续交付是大规模的 GitOps。你可以使用 Fleet 管理多达一百万个集群。此外,它非常轻量,因此也非常适用于[单个集群](https://fleet.rancher.io/installation#default-install)。但是,它在[大规模](https://fleet.rancher.io/installation#configuration-for-multi-cluster)场景下的功能更加强大。大规模指的是大量集群、大量部署或大量团队。
-
-Fleet 是一个独立于 Rancher 的项目,你可以使用 Helm 将它安装在任何 Kubernetes 集群上。
-
-
-## 架构
-
-有关 Fleet 工作原理的信息,请参阅[此处](../integrations-in-rancher/fleet-gitops-at-scale/architecture.md)。
-
-## 在 Rancher UI 中访问 Fleet
-
-Fleet 预装在 Rancher 中,可以通过 Rancher UI 中的**持续交付**选项进行管理。有关持续交付和 Fleet 故障排除技巧的更多信息,请参阅[此处](https://fleet.rancher.io/troubleshooting)。
-
-用户可以通过遵循 **gitops** 的实践,利用持续交付将应用部署到 git 仓库中的 Kubernetes 集群,而无需任何手动操作。
-
-按照以下步骤在 Rancher UI 中访问持续交付:
-
-1. 单击 **☰ > 持续交付**。
-
-1. 在菜单顶部选择你的命名空间,注意以下几点:
-
- - 默认情况下会选中 **fleet-default**,其中包括注册到 Rancher 的所有下游集群。
-
- - 你可以切换到仅包含 **local** 集群的 **fleet-local**,或者创建自己的工作空间,并将集群分配和移动到该工作空间。
-
- - 然后,你可以单击左侧导航栏上的**集群**来管理集群。
-
-1. 单击左侧导航栏上的 **Git 仓库**将 git 仓库部署到当前工作空间中的集群中。
-
-1. 选择你的 [git 仓库](https://fleet.rancher.io/gitrepo-add)和[目标集群/集群组](https://fleet.rancher.io/gitrepo-targets)。你还可以单击左侧导航栏中的**集群组**在 UI 中创建集群组。
-
-1. 部署 git 仓库后,你可以通过 Rancher UI 监控应用。
-
-## Windows 支持
-
-有关对具有 Windows 节点的集群的支持,请参阅[此页面](../integrations-in-rancher/fleet-gitops-at-scale/windows-support.md)。
-
-## GitHub 仓库
-
-你可以单击此处获取 [Fleet Helm Chart](https://github.com/rancher/fleet/releases)。
-
-## 在代理后使用 Fleet
-
-有关在代理后使用 Fleet 的详细信息,请参阅[此页面](../integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md)。
-
-## Helm Chart 依赖
-
-由于用户需要完成依赖列表,因此为了成功部署具有依赖项的 Helm Chart,你必须手动运行命令(如下所列)。如果你不这样做,并继续克隆仓库并运行 `helm install`,由于依赖项将丢失,因此你的安装将失败。
-
-git 仓库中的 Helm Chart 必须在 Chart 子目录中包含其依赖项。你必须手动运行 `helm dependencies update $chart`,或在本地运行 `helm dependencies build $chart`,然后将完整的 Chart 目录提交到你的 git 仓库。请注意,你需要使用适当的参数来修改命令。
-
-## 故障排除
-
-- **已知问题**:Fleet git 仓库的 clientSecretName 和 helmSecretName 密文不包含在由 [backup-restore-operator](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md#1-安装-rancher-backup-operator) 创建的备份或恢复中。如果我们有了永久的解决方案,我们将通知社区。
-
-- **临时解决方法**:默认情况下,用户定义的密文不会在 Fleet 中备份。如果执行灾难恢复或将 Rancher 迁移到新集群,则需要重新创建密文。要修改 resourceSet 以包含需要备份的其他资源,请参阅[此文档](https://github.com/rancher/backup-restore-operator#user-flow)。
-
-## 文档
-
-Fleet 文档链接:https://fleet.rancher.io/
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/helm-charts-in-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/helm-charts-in-rancher.md
deleted file mode 100644
index 67223803e4d..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/helm-charts-in-rancher.md
+++ /dev/null
@@ -1,155 +0,0 @@
----
-title: Rancher 中的 Helm Chart
----
-
-在本节中,你将学习如何在 Rancher 中管理 Helm Chart 仓库和应用。你可以在 **Apps** 中管理 Helm Chart 仓库。它使用类似目录的系统从仓库中导入 Chart 包,然后使用这些 Chart 来部署自定义 Helm 应用或 Rancher 工具(例如监控和 Istio)。Rancher 工具以预加载仓库的方式提供,并能部署为独立的 Helm Chart 。其他仓库只会添加到当前集群。
-
-### 版本控制方案
-
-Rancher 功能 Chart 版本控制方案以 Chart 的主要版本和上游 Chart 的 `+up` 注释(如果适用)为中心。
-
-**主要版本**:Chart 的主要版本与 Rancher 次要版本相关联。当你升级到新的 Rancher 次要版本时,你应该确保你的所有 **Apps** Chart 也升级到 Chart 的正确发行版本。
-
-**功能 Chart**:
-
-| **Name** | **支持的最低版本** | **支持的最高版本** |
-| ---------------- | ------------ | ------------ |
-| external-ip-webhook | 100.0.0+up1.0.0 | 100.0.1+up1.0.1 |
-| harvester-cloud-provider | 100.0.2+up0.1.12 | 100.0.2+up0.1.12 |
-| harvester-csi-driver | 100.0.2+up0.1.11 | 100.0.2+up0.1.11 |
-| neuvector | 100.0.0+up2.2.0 | 100.0.0+up2.2.0 |
-| rancher-alerting-drivers | 100.0.0 | 100.0.2 |
-| rancher-backup | 2.0.1 | 2.1.2 |
-| rancher-cis-benchmark | 2.0.1 | 2.0.4 |
-| rancher-gatekeeper | 100.0.0+up3.6.0 | 100.1.0+up3.7.1 |
-| rancher-istio | 100.0.0+up1.10.4 | 100.3.0+up1.13.3 |
-| rancher-logging | 100.0.0+up3.12.0 | 100.1.2+up3.17.4 |
-| rancher-longhorn | 100.0.0+up1.1.2 | 100.1.2+up1.2.4 |
-| rancher-monitoring | 100.0.0+up16.6.0 | 100.1.2+up19.0.3 |
-| rancher-sriov (experimental) | 100.0.0+up0.1.0 | 100.0.3+up0.1.0 |
-| rancher-vsphere-cpi | 100.3.0+up1.2.1 | 100.3.0+up1.2.1 |
-| rancher-vsphere-csi | 100.3.0+up2.5.1-rancher1 | 100.3.0+up2.5.1-rancher1 |
-| rancher-wins-upgrader | 0.0.100 | 100.0.1+up0.0.1 |
-
-
-
-**基于上游的 Chart**:对于基于上游的 Chart ,+up 注释用于表示 Rancher Chart 正在跟踪的上游版本。在升级时,请检查上游版本与 Rancher 的兼容性。
-
-- 例如,用于 Monitoring 的 `100.x.x+up16.6.0` 跟踪上游 kube-prometheus-stack `16.6.0` 并添加了一些 Rancher 补丁。
-
-- 在升级时,请确保你没有降级你正在使用的 Chart 版本。例如,如果你在 Rancher 2.5 中使用 Monitoring > `16.6.0` 版本,则不应升级到 `100.x.x+up16.6.0`。相反,你应该在下一个发行版中升级到适当的版本。
-
-### 预发布版本
-
-预发布版本遵循 [Semantic Versioning 2.0.0](https://semver.org/) 定义的[规范](https://semver.org/#spec-item-9)。例如,版本为 `0.1.3-dev.12ab4f` 的 Helm chart 为预发布版本。默认情况下不显示预发布版本,必须进行配置才能显示。
-
-要显示预发布版本:
-
-1. 单击右上角的用户头像。
-1. 单击**偏好设置**。
-1. 在 **Helm Chart** 下,选择**包括预发布版本**。
-
-### Charts
-
-从左上角的菜单中选择 _Apps_,然后你会转到 Chart 页面。
-
-Chart 页面包含所有 Rancher、Partner 和自定义 Chart 。
-
-* Rancher 工具(例如 Logging 或 Monitoring)包含在 Rancher 标签下
-* Partner Chart 位于 Partner 标签下
-* 自定义 Chart 将显示在仓库的名称下
-
-所有这三种类型都以相同的方式部署和管理。
-
-:::note
-
-由 Cluster Manager (旧版 Rancher UI 中的全局视图)管理的应用应继续仅由 Cluster Manager 管理,而在新 UI 中使用 Apps 管理的应用则仅能由 Apps 管理。
-
-:::
-
-### 仓库
-
-从左侧边栏中选择 _仓库_。
-
-这些项目代表 helm 仓库,可以是具有 index.yaml 的传统 helm 端点,也可以是被克隆并指向特定分支的 git 仓库。要使用自定义 Chart ,只需在此处添加你的仓库即可,它们将在仓库名称下的 Chart 选项卡中可用。
-
-为 Helm Chart 仓库添加私有 CA:
-
-- **基于 HTTP 的 Chart 仓库**:你必须将 DER 格式的 CA 证书的 base64 编码副本添加到 Chart 仓库的 spec.caBundle 字段,例如 `openssl x509 -outform der -in ca.pem | base64 -w0`。点击 Chart 仓库的**编辑 YAML** 并进行设置,如下所示:
- ```
- [...]
- spec:
- caBundle:
- MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT
- ...
- nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4=
- [...]
- ```
-
-
-- **基于 Git 的 Chart 仓库**:你必须将 DER 格式的 CA 证书的 base64 编码副本添加到 Chart 仓库的 spec.caBundle 字段,例如 `openssl x509 -outform der -in ca.pem | base64 -w0`。点击 Chart 仓库的**编辑 YAML** 并进行设置,如下所示:
- ```
- [...]
- spec:
- caBundle:
- MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT
- ...
- nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4=
- [...]
- ```
-
-:::note
-
-带有身份验证的 Helm Chart 仓库
-
-Repo.Spec 包含一个 `disableSameOriginCheck` 值,该值允许用户绕过相同源的检查,将仓库身份认证信息作为基本 Auth 标头与所有 API 调用一起发送。不建议采用这种做法,但这可以用作非标准 Helm Chart 仓库(例如重定向到不同源 URL 的仓库)的临时解决方案。
-
-要将此功能用于现有 Helm Chart 仓库,请单击 ⋮ > 编辑 YAML 。在 YAML 文件的 `spec` 部分,添加 `disableSameOriginCheck` 并将其设置为 `true`:
-
-```yaml
-[...]
-spec:
- disableSameOriginCheck: true
-[...]
-```
-
-:::
-
-### Helm 兼容性
-
-仅支持 Helm 3 兼容 Chart 。
-
-
-### 部署和升级
-
-从 _Chart_ 选项卡中选择要安装的 Chart 。Rancher 和 Partner Chart 可能通过自定义页面或 questions.yaml 文件进行额外的配置,但所有 Chart 安装都可以修改 values.yaml 和其他基本设置。单击安装后,将部署一个 Helm 操作作业,并显示该作业的控制台。
-
-要查看所有最近的更改,请转到 _最近的操作_ 选项卡。你可以查看已进行的调用、条件、事件和日志。
-
-安装 Chart 后,你可以在 _已安装的应用_ 选项卡中找到该 Chart。在本节中,你可以升级或删除安装,并查看更多详细信息。选择升级时,呈现的形式和数值与安装相同。
-
-大多数 Rancher 工具在 _Apps_ 下方的工具栏中都有额外的页面,以帮助你管理和使用这些功能。这些页面包括指向仪表板的链接、可轻松添加自定义资源的表单以及其他信息。
-
-:::caution
-
-如果你使用 _在升级前自定义 Helm 选项_ 来升级 Chart,如果你的 Chart 有不可更改的字段,使用 _--force_ 选项可能会导致错误。这是因为 Kubernetes 中的某些对象一旦创建就无法更改。要避免该错误,你可以:
-
-* 使用默认升级选项(即不要使用 _--force_ 选项)
-* 卸载现有 Chart 并安装升级后的 Chart
-* 在执行 _--force_ 升级之前删除集群中具有不可更改字段的资源
-
-:::
-
-#### 旧版应用
-
-**Apps > Installed Apps** 页面中,旧版应用的升级按钮已被移除。
-
-如果你安装了旧版应用并想要升级它:
-
-- 必须开启旧版[功能开关](enable-experimental-features.md)(如果在升级前有旧版应用导致该开关未自动开启)
-- 你可以从 cluster explorer 升级应用,从左侧导航部分选择**旧版 > 项目 > 应用**
-- 对于多集群应用,你可以转到 **≡ > 多集群应用**并在那里升级应用
-
-### 限制
-
-Rancher CLI **不能**用于安装[仪表板应用程序或 Rancher 功能 Chart](helm-charts-in-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/infrastructure-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/infrastructure-setup.md
deleted file mode 100644
index 16b3b3d4d97..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/infrastructure-setup.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Kubernetes 集群基础设施
----
-
-要为具有外部数据库的高可用 K3s Kubernetes 集群设置基础设施,请参见[本页面](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md)。
-
-
-要为高可用 RKE Kubernetes 集群设置基础设施,请参见[本页面](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-references.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-references.md
deleted file mode 100644
index e7a5b5ac802..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-references.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 安装参考
----
-
-有关其他安装资源,请参阅以下参考指南:[Rancher Helm Chart 选项](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md)、[TLS 设置](../getting-started/installation-and-upgrade/installation-references/tls-settings.md)和[功能开关](../getting-started/installation-and-upgrade/installation-references/feature-flags.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-requirements.md
deleted file mode 100644
index 0328552e0e8..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-requirements.md
+++ /dev/null
@@ -1,155 +0,0 @@
----
-title: 安装要求
-description: 如果 Rancher 配置在 Docker 或 Kubernetes 中运行时,了解运行 Rancher Server 的每个节点的节点要求
----
-
-本文描述了对需要安装 Rancher Server 的节点的软件、硬件和网络要求。Rancher Server 可以安装在单个节点或高可用的 Kubernetes 集群上。
-
-:::note 重要提示:
-
-如果你需要在 Kubernetes 集群上安装 Rancher,该节点的要求与用于运行应用和服务的[下游集群的节点要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md)不同。
-
-:::
-
-Rancher UI 在基于 Firefox 或 Chromium 的浏览器(Chrome、Edge、Opera、Brave)中效果最佳。
-
-查看我们的[最佳实践](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md)页面,获取在生产环境中运行 Rancher Server 的建议。
-
-## Kubernetes 与 Rancher 的兼容性
-
-Rancher 需要安装在支持的 Kubernetes 版本上。请查阅 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions),确保你的 Kubernetes 版本受支持。
-
-## 操作系统和容器运行时要求
-
-所有支持的操作系统都使用 64-bit x86 架构。Rancher 兼容当前所有的主流 Linux 发行版。
-
-[Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)列出了每个 Rancher 版本测试过的操作系统和 Docker 版本。
-
-运行 RKE 集群的节点需要安装 Docker。RKE2 或 K3s 集群不需要它。
-
-请安装 `ntp`(Network Time Protocol),以防止在客户端和服务器之间由于时间不同步造成的证书验证错误。
-
-某些 Linux 发行版的默认防火墙规则可能会阻止 Kubernetes 集群内的通信。从 Kubernetes v1.19 开始,你必须关闭 firewalld,因为它与 Kubernetes 网络插件冲突。
-
-如果你不太想这样做的话,你可以查看[相关问题](https://github.com/rancher/rancher/issues/28840)中的建议。某些用户已能成功[使用 ACCEPT 策略 为 Pod CIDR 创建一个独立的 firewalld 区域](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822)。
-
-如果你需要在 ARM64 上使用 Rancher,请参见[在 ARM64(实验功能)上运行 Rancher](../how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md)。
-
-### RKE 要求
-
-容器运行时方面,RKE 可以兼容当前的所有 Docker 版本。
-
-有关详细信息,请参阅[安装 Docker](../getting-started/installation-and-upgrade/installation-requirements/install-docker.md)。
-
-### K3s 要求
-
-对于容器运行时,K3s 默认附带了自己的 containerd。你也可以将 K3s 配置为使用已安装的 Docker 运行时。有关在 Docker 中使用 K3s 的更多信息,请参阅 [K3s 文档](https://docs.k3s.io/advanced#using-docker-as-the-container-runtime)。
-
-Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)。如需指定 K3s 版本,在运行 K3s 安装脚本时,使用 `INSTALL_K3S_VERSION` 环境变量。
-
-如果你使用 **Raspbian Buster** 在 K3s 集群上安装 Rancher,请按照[这些步骤](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster)切换到旧版 iptables。
-
-如果你使用 Alpine Linux 的 K3s 集群上安装 Rancher,请按照[这些步骤](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) 进行其他设置。
-
-### RKE2 要求
-
-对于容器运行时,RKE2 附带了自己的 containerd。RKE2 安装不需要 Docker。
-
-如需了解 RKE2 通过了哪些操作系统版本的测试,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)。
-
-## 硬件要求
-
-本节描述安装 Rancher Server 的节点的 CPU、内存和磁盘要求。
-
-## CPU 和内存
-
-硬件要求根据你的 Rancher 部署规模而定。请根据要求配置每个节点。通过单节点容器安装 Rancher,和在 Kubernetes 集群上安装 Rancher 的要求有所不同。
-
-### RKE 和托管 Kubernetes
-
-这些 CPU 和内存要求适用于每个安装 Rancher Server 的 Kubernetes 集群中的主机。
-
-这些要求适用于 RKE Kubernetes 集群以及托管的 Kubernetes 集群,例如 EKS。
-
-| 部署规模 | 集群 | 节点 | vCPUs | 内存 |
-| --------------- | ---------- | ------------ | -------| ------- |
-| 小 | 最多 150 个 | 最多 1500 个 | 2 | 8 GB |
-| 中 | 最多 300 个 | 最多 3,000 个 | 4 | 16 GB |
-| 大 | 最多 500 个 | 最多 5,000 个 | 8 | 32 GB |
-| 特大 | 最多 1,000 个 | 最多 10,000 个 | 16 | 64 GB |
-| 超大 | 最多 2,000 个 | 最多 20,000 个 | 32 | 128 GB |
-
-每个用例和环境都是不同的。请[联系 Rancher](https://rancher.com/contact/) 来审核你的情况。
-
-### K3s Kubernetes
-
-这些 CPU 和内存要求适用于每个[安装 Rancher Server 的 Kubernetes 集群](install-upgrade-on-a-kubernetes-cluster.md)中的主机。
-
-| 部署规模 | 集群 | 节点 | vCPUs | 内存 | 数据库大小 |
-| --------------- | ---------- | ------------ | -------| ---------| ------------------------- |
-| 小 | 最多 150 个 | 最多 1500 个 | 2 | 8 GB | 2 核,4 GB + 1,000 IOPS |
-| 中 | 最多 300 个 | 最多 3,000 个 | 4 | 16 GB | 2 核,4 GB + 1,000 IOPS |
-| 大 | 最多 500 个 | 最多 5,000 个 | 8 | 32 GB | 2 核,4 GB + 1,000 IOPS |
-| 特大 | 最多 1,000 个 | 最多 10,000 个 | 16 | 64 GB | 2 核,4 GB + 1,000 IOPS |
-| 超大 | 最多 2,000 个 | 最多 20,000 个 | 32 | 128 GB | 2 核,4 GB + 1,000 IOPS |
-
-每个用例和环境都是不同的。请[联系 Rancher](https://rancher.com/contact/) 来审核你的情况。
-
-
-### RKE2 Kubernetes
-
-这些 CPU 和内存要求适用于安装了 RKE2 的每个实例。最低配置要求如下:
-
-| 部署规模 | 集群 | 节点 | vCPUs | 内存 |
-| --------------- | -------- | --------- | ----- | ---- |
-| 小 | 最多 5 个 | 最多 50 个 | 2 | 5 GB |
-| 中 | 最多 15 个 | 最多 200 个 | 3 | 9 GB |
-
-### Docker
-
-这些 CPU 和内存要求适用于[单节点](rancher-on-a-single-node-with-docker.md)安装 Rancher 的主机。
-
-| 部署规模 | 集群 | 节点 | vCPUs | 内存 |
-| --------------- | -------- | --------- | ----- | ---- |
-| 小 | 最多 5 个 | 最多 50 个 | 1 | 4 GB |
-| 中 | 最多 15 个 | 最多 200 个 | 2 | 8 GB |
-
-## Ingress
-
-安装 Rancher 的 Kubernetes 集群中的每个节点都应该运行一个 Ingress。
-
-Ingress 需要部署为 DaemonSet 以确保负载均衡器能成功把流量转发到各个节点。
-
-如果是 RKE,RKE2 和 K3s 安装,你不需要手动安装 Ingress,因为它是默认安装的。
-
-对于托管的 Kubernetes 集群(EKS、GKE、AKS),你需要设置 Ingress。
-
-- **Amazon EKS**:[在 Amazon EKS 上安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md)。
-- **AKS**:[使用 Azure Kubernetes 服务安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)。
-- **GKE**:[使用 GKE 安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md)。
-
-## 磁盘
-
-etcd 在集群中的性能决定了 Rancher 的性能。因此,为了获得最佳速度,我们建议使用 SSD 磁盘来支持 Rancher 管理的 Kubernetes 集群。在云提供商上,你还需使用能获得最大 IOPS 的最小大小。在较大的集群中,请考虑使用专用存储设备存储 etcd 数据和 wal 目录。
-
-## 网络要求
-
-本节描述了安装 Rancher Server 的节点的网络要求。
-
-:::caution
-
-如果包含 Rancher 的服务器带有 `X-Frame-Options=DENY` 标头,在升级旧版 UI 之后,Rancher UI 中的某些页面可能无法渲染。这是因为某些旧版页面在新 UI 中是以 iFrames 模式嵌入的。
-
-:::
-
-### 节点 IP 地址
-
-无论你是在单个节点还是高可用集群上安装 Rancher,每个节点都应配置一个静态 IP。如果使用 DHCP,则每个节点都应该有一个 DHCP 预留,以确保节点分配到相同的 IP 地址。
-
-### 端口要求
-
-为了确保能正常运行,Rancher 需要在 Rancher 节点和下游 Kubernetes 集群节点上开放一些端口。不同集群类型的 Rancher 和下游集群的所有必要端口,请参见[端口要求](../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md)。
-
-## Dockershim 支持
-
-有关 Dockershim 支持的详情,请参见[此页面](../getting-started/installation-and-upgrade/installation-requirements/dockershim.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/istio-setup-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/istio-setup-guide.md
deleted file mode 100644
index 4ed26979729..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/istio-setup-guide.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: 设置指南
----
-
-本文介绍如何启用 Istio 并在你的项目中使用它。
-
-如果你使用 Istio 进行流量管理,则需要允许外部流量进入集群。在这种情况下,你将需要执行以下所有步骤。
-
-## 先决条件
-
-本指南假设你已经[安装 Rancher](installation-and-upgrade.md),且已经[配置了一个单独的 Kubernetes 集群](kubernetes-clusters-in-rancher-setup.md)并要在该集群上安装 Istio。
-
-集群中的节点必须满足 [CPU 和内存要求](../integrations-in-rancher/istio/cpu-and-memory-allocations.md)。
-
-Istio 控制的工作负载和服务必须满足 [Istio 要求](https://istio.io/docs/setup/additional-setup/requirements/)。
-
-## 安装
-
-:::tip 快速设置提示:
-
-如果你不需要外部流量到达 Istio,而只想设置 Istio 以监控和跟踪集群内的流量,请跳过[设置 Istio Gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) 和[设置 Istio 的流量管理组件](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md)步骤。
-
-:::
-
-1. [在集群中启用 Istio](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md)
-1. [在要使用 Istio 的所有命名空间中启用 Istio](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md)
-1. [添加注入了 Istio sidecar 的部署和服务](../how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md)
-1. [设置 Istio Gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md)
-1. [设置 Istio 的流量管理组件](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md)
-1. [生成流量并查看 Istio 的运行情况](../how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-cluster-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-cluster-setup.md
deleted file mode 100644
index 67a402ff972..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-cluster-setup.md
+++ /dev/null
@@ -1,7 +0,0 @@
----
-title: "Kubernetes 使用教程 "
----
-
-本章节介绍如何安装 Kubernetes 集群,使得 Rancher Server 可以安装在该集群上。
-
-Rancher 可以在任何 Kubernetes 集群上运行。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-components.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-components.md
deleted file mode 100644
index 7deb6057b8a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-components.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: Kubernetes 组件
----
-
-本文列出的命令和步骤适用于 [Rancher 启动的 Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群上的核心 Kubernetes 组件。
-
-本文包括以下类别的故障排除提示:
-
-- [etcd 节点故障排除](../troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md)
-- [Controlplane 节点故障排除](../troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md)
-- [nginx-proxy 节点故障排除](../troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md)
-- [Worker 节点和通用组件故障排除](../troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md)
-
-## Kubernetes 组件图
-
-
-线条表示组件之间的通信。而颜色纯粹用于视觉辅助。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-resources-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-resources-setup.md
deleted file mode 100644
index 146521b2bc5..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-resources-setup.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-title: Kubernetes 资源
----
-
-你可以在 Rancher UI 中查看和操作 Kubernetes 集群中的所有自定义资源和 CRD。
-
-## 工作负载
-
-使用[工作负载](workloads-and-pods.md)将应用部署到集群节点,工作负载是包含用于运行应用的 pod 的对象,以及为部署行为设置规则的元数据。工作负载可以部署在集群范围内,也可以部署在一个命名空间内。
-
-部署工作负载时,你可以使用任何镜像进行部署。可供选择的[工作负载类型](workloads-and-pods.md#工作负载类型)有多种,工作负载类型决定了你的应用程序的运行方式。
-
-在工作负载部署之后,你可以继续使用它。你可以:
-
-- 将工作负载[升级](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md)到它运行的应用的更新版本。
-- 如果升级出现问题,将工作负载[回滚](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md)到以前的版本。
-- [添加一个 sidecar](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md),这是一个支持主要工作负载的工作负载。
-
-## 负载均衡和 Ingress
-
-### 负载均衡器
-
-启动应用程序后,它仅在集群中可用。无法从外部访问它。
-
-如果你希望你的应用程序可以从外部访问,则必须向集群添加负载均衡器。如果用户知道负载均衡器的 IP 地址和应用的端口号,负载均衡器可以为外部连接创建一个访问集群的网关。
-
-Rancher 支持两种类型的负载均衡器:
-
-- [Layer-4 负载均衡器](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#四层负载均衡器)
-- [Layer-7 负载均衡器](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#七层负载均衡器)
-
-有关详细信息,请参阅[负载均衡器](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md)。
-
-#### Ingress
-
-负载均衡器只能处理每个 service 的一个 IP 地址。换言之,如果你在集群中运行了多个 service,则必须为每个 service 配备一个负载均衡器。运行多个负载均衡器的花费可能非常高昂。因此,你可以使用 Ingress 来解决此问题。
-
-Ingress 是一组充当负载均衡器的规则。Ingress 与一个或多个 Ingress Controller 一起动态路由 service 的请求。Ingress 收到请求时,集群中的 Ingress Controller 会对负载均衡器进行配置,从而根据你配置的 service 子域或路径规则将请求定向到正确的 service。
-
-有关详细信息,请参阅 [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md)。
-
-在项目中使用 Ingress 时,你可以通过设置全局 DNS 条目来将 Ingress 主机名编程到外部 DNS。
-
-## 服务发现
-
-使用负载均衡器和/或 Ingress 将集群公开给外部请求后,你只能通过 IP 地址访问集群。要创建可解析的主机名,你必须创建服务记录,该记录将 IP 地址、外部主机名、DNS 记录别名、工作负载或标记的 pod 映射到特定主机名。
-
-有关详细信息,请参阅[服务发现](../how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md)。
-
-## 应用程序
-
-除了启动应用程序的各个组件外,你还可以使用 Rancher 应用商店来启动应用,即 Helm Chart。
-
-## Kubernetes 资源
-
-在 Rancher 项目或命名空间的上下文中,_资源_ 是支持 Pod 操作的文件和数据。在 Rancher 中,证书、镜像仓库和密文都被视为资源。但是,Kubernetes 将资源划分为不同类型的[密文(secret)](https://kubernetes.io/docs/concepts/configuration/secret/)。因此,在单个项目或命名空间中,各个资源必须具有唯一的名称以避免冲突。资源主要用于承载敏感信息,但也有其他用途。
-
-资源包括:
-
-- [证书](../how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md):用于加密/解密进入或离开集群的数据的文件。
-- [ConfigMap](../how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md):存储一般配置信息的文件,例如一组配置文件。
-- [密文](../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md):存储密码、token 或密钥等敏感数据的文件。
-- [镜像仓库](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md):携带用于验证私有镜像仓库的凭证的文件。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/machine-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/machine-configuration.md
deleted file mode 100644
index f7878f968b7..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/machine-configuration.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 主机配置
----
-
-主机配置指的是如何将资源分配给虚拟机。请参阅 [Amazon EC2](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md)、[DigitalOcean](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md) 和 [Azure](../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md) 的文档以了解更多信息。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-projects.md
deleted file mode 100644
index 341878779ef..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-projects.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: 项目管理
----
-
-_项目_ 是 Rancher 中引入的对象,可帮助你更有组织地管理 Kubernetes 集群中的命名空间。你可以使用项目创建多租户集群,这种集群允许一组用户共享相同的底层资源来创建应用,而应用之间不会相互影响。
-
-在层次结构方面:
-
-- 集群包含项目
-- 项目包含命名空间
-
-在 Rancher 中,你可以使用项目将多个命名空间作为一个实体进行管理。在原生 Kubernetes(没有项目这个概念)中,RBAC 或集群资源等功能被分配给了各个命名空间。如果集群中的多个命名空间需要分配同样的访问权限,分配权限会变得非常繁琐。即使所有命名空间都需要相同的权限,但也无法使用一个操作中将这些权限应用于所有命名空间。你必须重复地将这些权限分配给每个命名空间。
-
-而 Rancher 通过引入项目的概念,通过允许你在项目级别应用资源和访问权限。然后,项目中的每个命名空间都会继承这些资源和策略。因此你只需将资源和策略分配给项目即可,不需要将它们分配给每个单独的命名空间。
-
-你可以使用项目执行以下操作:
-
-- [为用户分配一组命名空间的访问权限](../how-to-guides/new-user-guides/add-users-to-projects.md)
-- 为用户分配[项目中的特定角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。角色可以是所有者、成员、只读或[自定义](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md)
-- [设置资源配额](manage-project-resource-quotas.md)
-- [管理命名空间](../how-to-guides/new-user-guides/manage-namespaces.md)
-- [配置工具](../reference-guides/rancher-project-tools.md)
-- [配置 Pod 安全策略](../how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md)
-
-### 授权
-
-非管理者用户只有在[管理员](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者或成员](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)将非管理员用户添加到项目的**成员**选项卡后,才能获取项目的访问权限。
-
-创建项目的人自动成为[项目所有者](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。
-
-## 在项目之间切换
-
-要在项目之间切换,请使用导航栏中的下拉菜单。你也可以直接在导航栏中切换项目:
-
-1. 在左上角,单击 **☰ > 集群管理**。
-1. 在**集群**页面,进入要切换项目的集群然后点击 **Explore**。
-1. 在顶部导航栏中,选择要打开的项目。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-role-based-access-control-rbac.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-role-based-access-control-rbac.md
deleted file mode 100644
index a4cf2425ecb..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-role-based-access-control-rbac.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-title: RBAC
----
-
-Rancher 通过 _用户_ 进行授权管理。如[身份验证](authentication-config.md)中所述,用户可以是本地用户,也可以是外部用户。
-
-配置外部身份验证后,**用户**页面上显示的用户会发生变化。
-
-- 如果你以本地用户身份登录,则仅显示本地用户。
-
-- 如果你以外部用户身份登录,则会同时显示外部用户和本地用户。
-
-## 用户和角色
-
-一旦用户登录到 Rancher,他们的 _授权_,也就是他们在系统中的访问权限,将由 _全局权限_ 和 _集群和项目角色_ 决定。
-
-- [全局权限](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md):
-
- 定义用户在任何特定集群之外的授权。
-
-- [集群和项目角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md):
-
- 定义用户在分配了角色的特定集群或项目中的授权。
-
-全局权限以及集群和项目角色都是基于 [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) 实现的。因此,权限和角色的底层实现是由 Kubernetes 完成的。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-alerting-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-alerting-guides.md
deleted file mode 100644
index 7769d685ae4..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-alerting-guides.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Monitoring 指南
----
-
-- [启用 monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md)
-- [卸载 monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md)
-- [Monitoring 工作负载](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md)
-- [自定义 Grafana 仪表板](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md)
-- [持久化 Grafana 仪表板](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md)
-- [调试高内存使用率](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration.md
deleted file mode 100644
index 6cd47ac5d10..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Monitoring V2 配置
----
-
-本文介绍了在 Rancher 中配置 Monitoring V2 的必要选项:
-
-- [接收器配置](../reference-guides/monitoring-v2-configuration/receivers.md)
-- [路由配置](../reference-guides/monitoring-v2-configuration/routes.md)
-- [ServiceMonitor 和 PodMonitor 配置](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md)
-- [Helm Chart 选项](../reference-guides/monitoring-v2-configuration/helm-chart-options.md)
-- [示例](../reference-guides/monitoring-v2-configuration/examples.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/new-user-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/new-user-guides.md
deleted file mode 100644
index 7e46d5b69a8..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/new-user-guides.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: 新用户指南
----
-
-新用户指南(也称为**教程**)描述了某些操作的实际步骤。这些文档是“学习导向”的,也就是说用户通过“操作”来学习。
-
-新用户指南旨在引导初学者或 Rancher 的日常用户通过一系列步骤来学习如何进行某些操作。这些文档旨在帮助用户通过使用易于遵循、有意义且可重复的操作来了解如何完成任务。这些指南将帮助用户完成工作,并能立刻看到效果。
-
-正常来说,普通 Rancher 用户的技术水平高于“初学者”,但是,我们的新用户指南为初学者以及经验丰富的 Rancher 用户提供相同的指导。我们结合使用了高级语言和技术语言来介绍各个主题,并指导用户完成 Rancher 用户需要了解的通用任务。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/node-template-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/node-template-configuration.md
deleted file mode 100644
index 489dbbafa13..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/node-template-configuration.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: 节点模板配置
----
-
-要了解节点模板配置,请参阅[EC2 节点模板配置](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md)、[DigitalOcean 节点模板配置](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md)、[Azure 节点模板配置](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md)、[vSphere 节点模板配置](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md)和 [Nutanix 节点模板配置](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/other-installation-methods.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/other-installation-methods.md
deleted file mode 100644
index f56c0cbb179..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/other-installation-methods.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: 其他安装方式
----
-
-### 离线安装
-
-按照[以下步骤](air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
-
-离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-
-### Docker 安装
-
-[单节点 Docker 安装](rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
-
-Docker 安装仅用于开发和测试环境。
-
-由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
-
-Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/prometheus-federator-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/prometheus-federator-guides.md
deleted file mode 100644
index 742fd54b6fe..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/prometheus-federator-guides.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Prometheus Federator 指南
----
-
-- [启用 Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md)
-- [卸载 Prometheus Operator](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md)
-- [自定义 Grafana 仪表板](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md)
-- [设置工作负载](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-behind-an-http-proxy.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-behind-an-http-proxy.md
deleted file mode 100644
index e468db53dd5..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-behind-an-http-proxy.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 使用 HTTP 代理安装 Rancher
----
-
-很多企业本地运行的服务器或虚拟机不能直接访问互联网,但是出于安全考虑,他们必须通过 HTTP(S) 代理连接到外部服务。本教程将分步介绍如何在这样的环境中进行高可用的 Rancher 安装。
-
-另外,用户也可以在没有任何互联网访问的情况下离线设置 Rancher。详情请参见 [Rancher 官方文档](air-gapped-helm-cli-install.md)。
-
-## 安装概要
-
-1. [配置基础设施](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md)
-2. [配置 Kubernetes 集群](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md)
-3. [安装 Rancher](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-managed-clusters.md
deleted file mode 100644
index 15d2194487a..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-managed-clusters.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: Rancher 管理集群的最佳实践
----
-
-### Logging
-
-有关集群级别日志和应用日志的建议,请参见 [Logging 最佳实践](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md)。
-
-### Monitoring
-
-配置合理的监控和告警规则对于安全、可靠地运行生产环境中的工作负载至关重要。有关更多建议,请参阅[最佳实践](../reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md)。
-
-### 设置容器的技巧
-
-配置良好的容器可以极大地提高环境的整体性能和安全性。有关容器设置的建议,请参见[设置容器的技巧](../reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md)。
-
-### Rancher 管理 vSphere 集群的最佳实践
-
-[Rancher 管理 vSphere 集群的最佳实践](../reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md)概述了在 vSphere 环境中配置下游 Rancher 集群的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-manager-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-manager-architecture.md
deleted file mode 100644
index ad6a3ea462f..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-manager-architecture.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: 架构
----
-
-本章节重点介绍 [Rancher Server 及其组件](../reference-guides/rancher-manager-architecture/rancher-server-and-components.md) 以及 [Rancher 如何与下游 Kubernetes 集群通信](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md)。
-
-有关安装 Rancher 的不同方式的信息,请参见[安装选项概述](installation-and-upgrade.md#安装方式概述)。
-
-有关 Rancher API Server 的主要功能,请参见[概述](../getting-started/overview.md#rancher-api-server-的功能)。
-
-有关如何为 Rancher Server 设置底层基础架构,请参见[架构推荐](../reference-guides/rancher-manager-architecture/architecture-recommendations.md)。
-
-:::note
-
-本节默认你已对 Docker 和 Kubernetes 有一定的了解。如果你需要了解 Kubernetes 组件如何协作,请参见 [Kubernetes 概念](../reference-guides/kubernetes-concepts.md)。
-
-:::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server-configuration.md
deleted file mode 100644
index a0b6e3850b1..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server-configuration.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Rancher Server 配置
----
-
-- [RKE1 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)
-- [RKE2 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)
-- [K3s 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md)
-- [EKS 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md)
-- [AKS 集群配置](../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md)
-- [GKE 集群配置](../pages-for-subheaders/gke-cluster-configuration.md)
-- [使用现有节点](../pages-for-subheaders/use-existing-nodes.md)
-- [同步集群](../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server.md
deleted file mode 100644
index 0d632b6def6..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-server.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: Rancher Server 的最佳实践
----
-
-本指南介绍了让 Rancher 管理下游 Kubernetes 集群的 Rancher Server 运行建议。
-
-### 推荐的架构和基础设施
-
-有关在高可用 Kubernetes 集群上设置 Rancher Server 的通用建议,请参见[本指南](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md)。
-
-### 部署策略
-
-[本指南](../reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md)旨在帮助你选择部署策略(区域部署/中心辐射型部署),来让 Rancher Server 更好地管理下游 Kubernetes 集群。
-
-### 在 vSphere 环境中安装 Rancher
-
-[本指南](../reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md)介绍了在 vSphere 环境中安装 Rancher 的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/resources.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/resources.md
deleted file mode 100644
index 45aa544fa7e..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/resources.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-title: 资源
----
-
-### Docker 安装
-
-[单节点 Docker 安装](rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
-
-由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
-
-### 离线安装
-
-按照[以下步骤](air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
-
-离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
-
-### 高级选项
-
-安装 Rancher 时,有如下几个可开启的高级选项:每个安装指南中都提供了对应的选项。了解选项详情:
-
-- [自定义 CA 证书](../getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md)
-- [API 审计日志](../how-to-guides/advanced-user-guides/enable-api-audit-log.md)
-- [TLS 设置](../getting-started/installation-and-upgrade/installation-references/tls-settings.md)
-- [etcd 配置](../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)
-- [离线安装 Local System Chart](../getting-started/installation-and-upgrade/resources/local-system-charts.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/single-node-rancher-in-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/single-node-rancher-in-docker.md
deleted file mode 100644
index ec2524a165f..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/single-node-rancher-in-docker.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: Docker 中的单节点 Rancher
----
-
-以下文档将讨论 Docker 安装的 [HTTP 代理配置](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md)和[高级选项](../reference-guides/single-node-rancher-in-docker/advanced-options.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/user-settings.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/user-settings.md
deleted file mode 100644
index 5ed627b7b07..00000000000
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/user-settings.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: 用户设置
----
-
-在 Rancher 中,每个用户都有很多与登录相关的设置,例如个人偏好、API 密钥等。你可以从**用户设置**菜单中配置这些设置。你可以单击主菜单中的头像来打开此菜单。
-
-
-
-可用的用户设置包括:
-
-- [API & 密钥](../reference-guides/user-settings/api-keys.md):如果你想以编程方式与 Rancher 交互,你需要一个 API 密钥。你可以按照本节中的说明获取密钥。
-- [云凭证](../reference-guides/user-settings/manage-cloud-credentials.md):管理[节点模板](use-new-nodes-in-an-infra-provider.md#节点模板)使用的云凭证,从而[为集群配置节点](../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
-- [节点模板](../reference-guides/user-settings/manage-node-templates.md):管理 [Rancher 用来为集群配置节点](../pages-for-subheaders/launch-kubernetes-with-rancher.md)的模板。
-- [偏好设置](../reference-guides/user-settings/user-preferences.md):设置 Rancher UI 的表面首选项。
-- 登出:结束你的用户会话。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
index c48a1d04995..f7c470b45c9 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md
@@ -4,7 +4,7 @@ title: 监控最佳实践
配置合理的监控和告警规则对于安全、可靠地运行生产环境中的工作负载至关重要。在使用 Kubernetes 和 Rancher 时也是如此。幸运的是,你可以使用集成的监控和告警功能来简化整个过程。
-[Rancher 监控文档](../../../pages-for-subheaders/monitoring-and-alerting.md)描述了如何设置完整的 Prometheus 和 Grafana。这是开箱即用的功能,它将从集群中的所有系统和 Kubernetes 组件中抓取监控数据,并提供合理的仪表板和告警。但为了实现可靠的设置,你还需要监控你的工作负载并使 Prometheus 和 Grafana 适应你的特定用例和集群规模。本文档将为你提供这方面的最佳实践。
+[Rancher 监控文档](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)描述了如何设置完整的 Prometheus 和 Grafana。这是开箱即用的功能,它将从集群中的所有系统和 Kubernetes 组件中抓取监控数据,并提供合理的仪表板和告警。但为了实现可靠的设置,你还需要监控你的工作负载并使 Prometheus 和 Grafana 适应你的特定用例和集群规模。本文档将为你提供这方面的最佳实践。
## 监控内容
@@ -86,7 +86,7 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
如果你有一个(微)服务架构,在该架构中集群的多个单独的工作负载相互通信,那么拥有这些流量的详细指标和跟踪是非常重要的,因为这可以帮助你了解所有这些工作负载之间的通信方式,以及问题或瓶颈可能出现的地方。
-当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](../../../pages-for-subheaders/istio.md)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。
+当然,你可以监控所有工作负载中的所有内部流量,并将这些指标暴露给 Prometheus,但这相当耗费精力。像 Istio 这样的服务网格(可以通过[单击](../../../integrations-in-rancher/istio/istio.md)在 Rancher 中安装)可以自动完成这项工作,并提供所有 Service 之间流量的丰富的遥测数据。
## 真实用户监控
@@ -94,7 +94,7 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
## 安全监控
-除了通过监控工作负载来检测性能、可用性或可扩展性之外,你还应该监控集群和运行在集群中的工作负载,来发现潜在的安全问题。一个好的做法是经常运行 [CIS 扫描](../../../pages-for-subheaders/cis-scan-guides.md)并发出告警,来检查集群是否按照安全最佳实践进行配置。
+除了通过监控工作负载来检测性能、可用性或可扩展性之外,你还应该监控集群和运行在集群中的工作负载,来发现潜在的安全问题。一个好的做法是经常运行 [CIS 扫描](../../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)并发出告警,来检查集群是否按照安全最佳实践进行配置。
对于工作负载,你可以查看 Kubernetes 和 Container 安全解决方案,例如 [NeuVector](https://www.suse.com/products/neuvector/)、[Falco](https://falco.org/)、[Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/) 和 [SysDig](https://sysdig.com/)。
@@ -108,4 +108,4 @@ Prometheus 不是用于长期存储指标的,它只用于短期存储。
如果告警开始发送,但你暂时无法处理,你也可以将告警静默一定时间,以便以后查看。
-如果需要了解更多关于如何设置告警和通知通道的信息,请访问 [Rancher 文档中心](../../../pages-for-subheaders/monitoring-and-alerting.md)。
+如果需要了解更多关于如何设置告警和通知通道的信息,请访问 [Rancher 文档中心](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
index e12f1db2498..9cd0bd1ca15 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters.md
@@ -6,18 +6,18 @@ title: Rancher 管理集群的最佳实践
-### Logging
+## Logging
有关集群级别日志和应用日志的建议,请参见 [Logging 最佳实践](logging-best-practices.md)。
-### Monitoring
+## Monitoring
配置合理的监控和告警规则对于安全、可靠地运行生产环境中的工作负载至关重要。有关更多建议,请参阅[最佳实践](monitoring-best-practices.md)。
-### 设置容器的技巧
+## 设置容器的技巧
配置良好的容器可以极大地提高环境的整体性能和安全性。有关容器设置的建议,请参见[设置容器的技巧](tips-to-set-up-containers.md)。
-### Rancher 管理 vSphere 集群的最佳实践
+## Rancher 管理 vSphere 集群的最佳实践
[Rancher 管理 vSphere 集群的最佳实践](rancher-managed-clusters-in-vsphere.md)概述了在 vSphere 环境中配置下游 Rancher 集群的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
index d48c0cf2af2..95431ee3e11 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md
@@ -8,7 +8,7 @@ title: 设置容器的技巧
如果你需要了解容器安全的详细信息,也可以参见 Rancher 的[容器安全指南](https://rancher.com/complete-guide-container-security)。
-### 使用通用容器操作系统
+## 使用通用容器操作系统
在可能的情况下,你应该尽量在通用的容器基础操作系统上进行标准化。
@@ -16,17 +16,17 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
流行的发行版如 Ubuntu、Fedora 和 CentOS 等都经过了大量的测试,并提供了更多的功能。
-### 使用 From scratch 容器
+## 使用 From scratch 容器
如果你的微服务是一个独立的静态二进制,你应该使用 `From scratch` 容器。
`FROM scratch` 容器是一个[官方 Docker 镜像](https://hub.docker.com/_/scratch),它是空的,这样你就可以用它来设计最小的镜像。
这个镜像这将具有最小的攻击层和最小的镜像大小。
-### 以非特权方式运行容器进程
+## 以非特权方式运行容器进程
在可能的情况下,在容器内运行进程时使用非特权用户。虽然容器运行时提供了隔离,但仍然可能存在漏洞和攻击。如果容器以 root 身份运行,无意中或意外的主机挂载也会受到影响。有关为 Pod 或容器配置安全上下文的详细信息,请参见 [Kubernetes 文档](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)。
-### 定义资源限制
+## 定义资源限制
你应该将 CPU 和内存限制应用到你的 Pod 上。这可以帮助管理 worker 节点上的资源,并避免发生故障的微服务影响其他微服务。
在标准 Kubernetes 中,你可以设置命名空间级别的资源限制。在 Rancher 中,你可以设置项目级别的资源限制,项目内的所有命名空间都会继承这些限制。详情请参见 Rancher 官方文档。
@@ -35,7 +35,7 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
有关如何在[容器级别](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container)和命名空间级别设置资源限制的更多信息,请参见 Kubernetes 文档。
-### 定义资源需求
+## 定义资源需求
你应该将 CPU 和内存要求应用到你的 Pod 上。这对于通知调度器需要将你的 pod 放置在哪种类型的计算节点上,并确保它不会过度配置该节点资源至关重要。在 Kubernetes 中,你可以通过在 pod 的容器规范的资源请求字段中定义 `resources.requests` 来设置资源需求。详情请参见 [Kubernetes 文档](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container)。
:::note
@@ -46,7 +46,7 @@ Alpine 和 BusyBox 等较小的发行版减少了容器镜像的大小,并且
建议在容器级别上定义资源需求,否则,调度器会认为集群加载对你的应用没有帮助。
-### 配置存活和就绪探测器
+## 配置存活和就绪探测器
你可以为你的容器配置存活探测器和就绪探测器。如果你的容器不是完全崩溃,Kubernetes 是不会知道它是不健康的,除非你创建一个可以报告容器状态的端点或机制。或者,确保你的容器在不健康的情况下停止并崩溃。
Kubernetes 文档展示了如何[为容器配置存活和就绪探测器](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
index 8ffe650d009..efdfde73d91 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md
@@ -39,7 +39,7 @@ title: 在 vSphere 环境中安装 Rancher
### 根据 Rancher 文档确定虚拟机的大小
-请参阅[安装要求](../../../pages-for-subheaders/installation-requirements.md)。
+请参阅[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。
### 利用虚拟机模板来构建环境
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/rancher-server.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/rancher-server.md
index d297b395a0a..00202488669 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/rancher-server.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/rancher-server.md
@@ -8,14 +8,14 @@ title: Rancher Server 的最佳实践
本指南介绍了让 Rancher 管理下游 Kubernetes 集群的 Rancher Server 运行建议。
-### 推荐的架构和基础设施
+## 推荐的架构和基础设施
有关在高可用 Kubernetes 集群上设置 Rancher Server 的通用建议,请参见[本指南](tips-for-running-rancher.md)。
-### 部署策略
+## 部署策略
[本指南](rancher-deployment-strategy.md)旨在帮助你选择部署策略(区域部署/中心辐射型部署),来让 Rancher Server 更好地管理下游 Kubernetes 集群。
-### 在 vSphere 环境中安装 Rancher
+## 在 vSphere 环境中安装 Rancher
[本指南](on-premises-rancher-in-vsphere.md)介绍了在 vSphere 环境中安装 Rancher 的参考架构,以及 VMware 记录的标准 vSphere 最佳实践。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
index 0c47b75fa06..338102c3fa5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md
@@ -10,27 +10,31 @@ title: Rancher 运行技巧
在设置高可用 Rancher 安装时,请考虑以下事项。
-### 在单独的集群上运行 Rancher
+## 在单独的集群上运行 Rancher
+
不要在安装了 Rancher 的 Kubernetes 集群上运行其他工作负载或微服务。
-### 确保 Kubernetes 节点配置正确
-在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.4/op-guide/performance/)。
+## 确保 Kubernetes 节点配置正确
+
+在部署节点时,请遵循 K8s 和 etcd 的最佳实践,其中包括禁用 swap,检查集群中的所有主机之间是否有良好的网络连接,为每个节点使用唯一的主机名、MAC 地址和 `product_uuids`,检查所需端口是否已经打开,并使用配置 SSD 的 etcd 进行部署。详情请参见 [kubernetes 官方文档](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin)和 [etcd 性能操作指南](https://etcd.io/docs/v3.5/op-guide/performance/)。
+
+## 使用 RKE 时:备份状态文件(Statefile)
-### 使用 RKE 时:备份状态文件(Statefile)
RKE 将集群状态记录在一个名为 `cluster.rkestate` 的文件中,该文件对集群的恢复和/或通过 RKE 维护集群非常重要。由于这个文件包含证书材料,我们强烈建议在备份前对该文件进行加密。请在每次运行 `rke up` 后备份状态文件。
-### 在同一个数据中心运行集群中的所有节点
+## 在同一个数据中心运行集群中的所有节点
+
为达到最佳性能,请在同一地理数据中心运行所有三个节点。如果你在云(如 AWS)上运行节点,请在不同的可用区(AZ)中运行这三个节点。例如,在 us-west-2a 中运行节点 1,在 us-west-2b 中运行节点 2,在 us-west-2c 中运行节点 3。
-### 保证开发和生产环境的相似性
+## 保证开发和生产环境的相似性
+
强烈建议为运行 Rancher 的 Kubernetes 集群配备 “staging” 或 “pre-production” 环境。这个环境的软件和硬件配置应该尽可能接近你的生产环境。
-### 监控集群以规划容量
-Rancher Server 的 Kubernetes 集群应该尽可能满足[系统和硬件要求](../../../pages-for-subheaders/installation-requirements.md)。越偏离系统和硬件要求,你可能面临的风险就越大。
+## 监控集群以规划容量
+Rancher Server 的 Kubernetes 集群应该尽可能满足[系统和硬件要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md)。越偏离系统和硬件要求,你可能面临的风险就越大。
但是,已发布的要求已经考虑了各种工作负载类型,因此,基于指标来规划容量应该是扩展 Rancher 的最佳实践。
你可以将 Rancher 集成业界领先的开源监控解决方案 Prometheus 以及能可视化 Prometheus 指标的 Grafana,来监控集群节点、Kubernetes 组件和软件部署的状态和过程。
-在集群中[启用监控](../../../pages-for-subheaders/monitoring-and-alerting.md)后,你可以通过设置告警通知,来了解集群容量的使用情况。你还可以使用 Prometheus 和 Grafana 监控框架,在你扩容时建立关键指标的基线。
-
+在集群中[启用监控](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)后,你可以通过设置告警通知,来了解集群容量的使用情况。你还可以使用 Prometheus 和 Grafana 监控框架,在你扩容时建立关键指标的基线。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
index 8d5fc725d36..8e98f63729f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tips-for-scaling-rancher.md
@@ -56,6 +56,6 @@ Rancher 的大部分逻辑都发生在事件处理程序上。每当更新对象
与 Rancher 版本类似,我们建议让你的 kubernetes 集群保持使用最新版本。这将确保你的集群能包含可用的性能增强或错误修复。
### 优化 ETCD
-[ETCD 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
+[ETCD 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘速度和网络速度。对任何一个进行优化都应该能提高性能。有关 ETCD 性能的信息,请参阅 [etcd 性能慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装调优 etcd](https://docs.ranchermanager.rancher.io/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs)。有关磁盘的信息,你也可以参阅[我们的文档](https://docs.Ranchermanager.Rancher.io/v2.5/pages-for-subheaders/installation-requirements#disks)。
理论上,ETCD 集群中的节点越多,由于复制要求 [source](https://etcd.io/docs/v3.3/faq),它就会越慢。这可能与常见的缩放方法相悖。我们还可以推断,ETCD 的性能将受到节点间距离的反面影响,因为这将减慢网络通信。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
index 552e79ec76b..154b01c5ad4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md
@@ -110,7 +110,7 @@ Rancher 的大部分逻辑发生在 Event Handler 上。每当资源对象产生
Etcd 是 Kubernetes 和 Rancher 的后端数据库,在 Rancher 性能中扮演重要的角色。
-[Etcd 性能](https://etcd.io/docs/v3.4/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
+[Etcd 性能](https://etcd.io/docs/v3.5/op-guide/performance/)的两个主要瓶颈是磁盘和网络速度。Etcd 应当在具有高速网络和高读写速度 (IOPS) SSD 硬盘的专用节点上运行。有关 etcd 性能的更多信息,请参阅 [etcd 性能缓慢(性能测试和优化)](https://www.suse.com/support/kb/doc/?id=000020100)和[为大型安装进行 etcd 调优](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)。有关磁盘的信息可以在[安装要求](../../../getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md#磁盘)中找到。
根据 etcd 的[复制机制](https://etcd.io/docs/v3.5/faq/#what-is-maximum-cluster-size),建议在三个节点上运行 etcd,运行在更多的节点上反而会降低速度。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/kubectl-utility.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/kubectl-utility.md
index 62377d846c6..28315bf813d 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/kubectl-utility.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/kubectl-utility.md
@@ -18,7 +18,7 @@ kubectl 用于与 Rancher 进行交互。
_要求_
-如果管理员[关闭了 kubeconfig 令牌生成](../about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),当你运行 `kubectl` 时,kubeconfig 文件需要 [Rancher CLI](./rancher-cli.md) 存在于你的 PATH 中。否则,你会看到这样的错误信息:
+如果管理员[关闭了 kubeconfig 令牌生成](../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌),当你运行 `kubectl` 时,kubeconfig 文件需要 [Rancher CLI](./rancher-cli.md) 存在于你的 PATH 中。否则,你会看到这样的错误信息:
`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`。
该功能可以让 kubectl 与 Rancher Server 进行身份验证,并在需要时获得新的 kubeconfig token。目前支持以下验证提供程序:
@@ -29,4 +29,4 @@ _要求_
4. OpenLDAP
5. SAML 身份提供商:Ping,Okta,ADFS,Keycloak 和 Shibboleth
-如果你是第一次运行 kubectl(例如,`kubectl get pods`),它会要求你选择一个验证提供程序并使用 Rancher Server 登录。kubeconfig token 会被缓存到 `./.cache/token` 下你运行 kubectl 的路径中。该 Token 在[过期](../about-the-api/api-tokens.md#在生成的-kubeconfig-中禁用令牌)或[从 Rancher Server 删除](../about-the-api/api-tokens.md#删除令牌)之前都是有效的。过期后,下一个 `kubectl get pods` 命令会要求你再次使用 Rancher Server 登录。
\ No newline at end of file
+如果你是第一次运行 kubectl(例如,`kubectl get pods`),它会要求你选择一个验证提供程序并使用 Rancher Server 登录。kubeconfig token 会被缓存到 `./.cache/token` 下你运行 kubectl 的路径中。该 Token 在[过期](../../api/api-tokens.md#在生成的-kubeconfig-中禁用令牌)或[从 Rancher Server 删除](../../api/api-tokens.md#删除令牌)之前都是有效的。过期后,下一个 `kubectl get pods` 命令会要求你再次使用 Rancher Server 登录。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md
index 4cf44238004..b8accc82b57 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cli-with-rancher/rancher-cli.md
@@ -5,7 +5,7 @@ description: Rancher CLI 是一个命令行工具,用于在工作站中与 Ran
Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher 进行交互。使用此工具,你可以使用命令行而不用通过 GUI 来操作 Rancher。
-### 下载 Rancher CLI
+## 下载 Rancher CLI
你可以直接 UI 下载二进制文件。
@@ -13,14 +13,14 @@ Rancher CLI(命令行界面)是一个命令行工具,可用于与 Rancher
1. 在导航侧边栏菜单底部,单击**简介**。
1. 在 **CLI 下载**中,有 Windows、Mac 和 Linux 的二进制文件下载链接。你还可以访问我们的 CLI [发布页面](https://github.com/rancher/cli/releases)直接下载二进制文件。
-### 要求
+## 要求
下载 Rancher CLI 后,你需要进行一些配置。Rancher CLI 需要:
- 你的 Rancher Server URL,用于连接到 Rancher Server。
- API 持有者令牌(Bearer Token),用于向 Rancher 进行身份验证。有关获取持有者令牌的更多信息,请参阅[创建 API 密钥](../user-settings/api-keys.md)。
-### CLI 身份验证
+## CLI 身份验证
在使用 Rancher CLI 控制你的 Rancher Server 之前,你必须使用 API 持有者令牌进行身份验证。运行以下命令进行登录(将 `` 和 `` 替换为你的实际信息):
@@ -30,7 +30,7 @@ $ ./rancher login https:// --token
如果 Rancher Server 使用自签名证书,Rancher CLI 会提示你继续连接。
-### 项目选择
+## 项目选择
在执行命令之前,你必须先选择一个 Rancher 项目来执行这些命令。要选择[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md),请运行 `./rancher context switch` 命令。输入此命令后,会显示可用项目的列表。输入一个数字以选择项目。
@@ -54,34 +54,34 @@ INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json
请确保你可以成功运行 `rancher kubectl get pods`。
-### 命令
+## 命令
以下命令可用于 Rancher CLI:
| 命令 | 结果 |
|---|---|
| `apps, [app]` | 对商店应用(即单个 [Helm Chart](https://docs.helm.sh/developing_charts/))或 Rancher Chart 执行操作。 |
-| `catalog` | 对[应用商店](../../pages-for-subheaders/helm-charts-in-rancher.md)执行操作。 |
-| `clusters, [cluster]` | 对[集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)执行操作。 |
+| `catalog` | 对[应用商店](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)执行操作。 |
+| `clusters, [cluster]` | 对[集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)执行操作。 |
| `context` | 在 Rancher [项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)之间切换。有关示例,请参阅[项目选择](#项目选择)。 |
-| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | 显示 [Kubernetes 资源](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types)或 Rancher 资源(即[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)和[工作负载](../../pages-for-subheaders/workloads-and-pods.md))的详细信息。按名称或 ID 指定资源。 |
+| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | 显示 [Kubernetes 资源](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types)或 Rancher 资源(即[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)和[工作负载](../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md))的详细信息。按名称或 ID 指定资源。 |
| `kubectl` | 运行 [kubectl 命令](https://kubernetes.io/docs/reference/kubectl/overview/#operations)。 |
| `login, [l]` | 登录 Rancher Server。有关示例,请参阅 [CLI 身份验证](#cli-身份验证)。 |
| `namespaces, [namespace]` | 执行命名空间操作。 |
| `nodes, [node]` | 执行节点空间操作。 |
| `projects, [project]` | 执行[项目](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)操作。 |
-| `ps` | 显示项目中的[工作负载](../../pages-for-subheaders/workloads-and-pods.md)。 |
+| `ps` | 显示项目中的[工作负载](../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)。 |
| `settings, [setting]` | 显示 Rancher Server 的当前设置。 |
| `ssh` | 使用 SSH 协议连接到你的某个集群节点。 |
| `help, [h]` | 显示命令列表或某个命令的帮助。 |
-### Rancher CLI 帮助
+## Rancher CLI 帮助
使用 CLI 登录 Rancher Server 后,输入 `./rancher --help` 以获取命令列表。
所有命令都支持 `--help` 标志,该标志解释了每个命令的用法。
-### 限制
+## 限制
-Rancher CLI **不能**用于安装[仪表板应用程序或 Rancher 功能 Chart](../../pages-for-subheaders/helm-charts-in-rancher.md)。
+Rancher CLI **不能**用于安装[仪表板应用程序或 Rancher 功能 Chart](../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md
index 9bc1db8d00b..ada0319a7da 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/cluster-configuration.md
@@ -10,18 +10,18 @@ title: 集群配置
有关编辑集群成员资格的信息,请转至[此页面](../../how-to-guides/new-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md)。
-### 集群配置参考
+## 集群配置参考
集群配置选项取决于 Kubernetes 集群的类型:
-- [RKE 集群配置](rancher-server-configuration/rke1-cluster-configuration.md)
-- [RKE2 集群配置](rancher-server-configuration/rke2-cluster-configuration.md)
-- [K3s 集群配置](rancher-server-configuration/k3s-cluster-configuration.md)
-- [EKS 集群配置](rancher-server-configuration/eks-cluster-configuration.md)
-- [GKE 集群配置](gke-cluster-configuration.md)
-- [AKS 集群配置](rancher-server-configuration/aks-cluster-configuration.md)
+- [RKE 集群配置](./rancher-server-configuration/rke1-cluster-configuration.md)
+- [RKE2 集群配置](./rancher-server-configuration/rke2-cluster-configuration.md)
+- [K3s 集群配置](./rancher-server-configuration/k3s-cluster-configuration.md)
+- [EKS 集群配置](./rancher-server-configuration/eks-cluster-configuration.md)
+- [GKE 集群配置](./rancher-server-configuration/gke-cluster-configuration/gke-cluster-configuration.md)
+- [AKS 集群配置](./rancher-server-configuration/aks-cluster-configuration.md)
-### 不同类型集群的管理功能
+## 不同类型集群的管理功能
对于已有集群而言,可提供的选项和设置取决于你配置集群的方法。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/downstream-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/downstream-cluster-configuration.md
index 591285c378e..70e32b32c13 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/downstream-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/downstream-cluster-configuration.md
@@ -6,4 +6,4 @@ title: 下游集群配置
-以下文档将讨论[节点模板配置](./node-template-configuration.md)和[主机配置](./machine-configuration.md)。
+以下文档将讨论[节点模板配置](./node-template-configuration/node-template-configuration.md)和[主机配置](./machine-configuration/machine-configuration.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
index 98e6b500737..656430783c5 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md
@@ -4,6 +4,8 @@ title: EC2 主机配置参考
有关 EC2 和节点的更多详细信息,请参阅 [EC2 管理控制台](https://aws.amazon.com/ec2)的官方文档。
+## Machine Pools
+
### 区域
构建集群的地理[区域](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
index 6ea2549ccf8..6abf01e5e2e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md
@@ -4,6 +4,8 @@ title: Azure 主机配置
有关 Azure 的更多信息,请参阅官方 [Azure 文档](https://docs.microsoft.com/en-us/azure/?product=featured)。
+## Machine Pools
+
### 环境
Microsoft 提供了多个[云](https://docs.microsoft.com/en-us/cli/azure/cloud?view=azure-cli-latest)来满足地区法律的要求:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
index 6fdddebab84..16cb5d9f12f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md
@@ -4,6 +4,8 @@ title: DigitalOcean 主机配置
有关 DigitalOcean、Droplet 的更多详细信息,请参阅[官方文档](https://docs.digitalocean.com/products/compute/)。
+## Machine Pools
+
### 区域
配置创建 Droplet 的[区域](https://docs.digitalocean.com/glossary/region/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
index 56082ed497a..5f222f19325 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md
@@ -4,11 +4,11 @@ title: EC2 节点模板配置
有关 EC2 和节点的更多详细信息,请参阅 [EC2 管理控制台](https://aws.amazon.com/ec2)的官方文档。
-### 区域
+## 区域
在**区域**字段中,选择创建云凭证时使用的同一区域。
-### 云凭证
+## 云凭证
你的 AWS 账户访问信息,存储在[云凭证](../../../user-settings/manage-cloud-credentials.md)中。
@@ -21,14 +21,14 @@ title: EC2 节点模板配置
参阅下面的三个示例 JSON 策略:
- [IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#iam-策略示例)
-- [带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
+- [带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)(如果要使用 [Kubernetes 云提供商](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md),或将 IAM 配置文件传递给实例,则需要)
- [允许用户加密 EBS 卷的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#允许加密-ebs-卷的-iam-策略示例)
-### 验证和配置节点
+## 验证和配置节点
为集群选择可用区和网络设置。
-### 安全组
+## 安全组
选择默认安全组或配置安全组。
@@ -36,12 +36,12 @@ title: EC2 节点模板配置
如果你自行为 EC2 实例提供安全组,Rancher 不会对其进行修改。因此,你需要让你的安全组允许 [Rancher 配置实例所需的端口](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rke-上-rancher-server-节点的端口)。有关使用安全组控制 EC2 实例的入站和出站流量的更多信息,请参阅[这里](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#WorkingWithSecurityGroups)。
-### 实例选项
+## 实例选项
配置要创建的实例。确保为 AMI 配置正确的 **SSH 用户**。所选的区域可能不支持默认实例类型。在这种情况下,你必须选择一个确实存在的实例类型。否则将出现错误,表示请求的配置不受支持。
-如果需要传递 **IAM 示例配置名称**(不是 ARN),例如要使用 [Kubernetes 云提供商](../../../../pages-for-subheaders/set-up-cloud-providers.md)时,策略则需要其他权限。有关示例策略,请参阅[带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)。
+如果需要传递 **IAM 示例配置名称**(不是 ARN),例如要使用 [Kubernetes 云提供商](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)时,策略则需要其他权限。有关示例策略,请参阅[带有 PassRole 的 IAM 策略示例](../../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#带有-passrole-的-iam-策略示例)。
-### 引擎选项
+## 引擎选项
在节点模板的**引擎选项**中,你可以配置容器 daemon。你可能需要指定容器版本或容器镜像仓库 Mirror。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
index 4c00d381982..141c2973e0f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md
@@ -4,11 +4,11 @@ title: DigitalOcean 节点模板配置
账户访问信息存储在云凭证中。云凭证存储在 Kubernetes 密文中。多个节点模板可以使用相同的云凭证。你可以使用现有的云凭证或创建新的凭证。
-### Droplet 选项
+## Droplet 选项
**Droplet 选项**用于配置集群的地理区域和规范。
-### Docker Daemon
+## Docker Daemon
如果你使用 Docker,[Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) 配置选项包括:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
index a296187be6b..d617a0d6497 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md
@@ -13,7 +13,7 @@ title: AKS 集群配置参考
在 Rancher UI 中配置 AKS 集群时,无法禁用 RBAC。如果在 AKS 中为集群禁用了 RBAC,则无法在 Rancher 中注册或导入集群。
-Rancher 可以使用与其他集群一样的方式为 AKS 集群配置成员角色。有关详细信息,请参阅 [RBAC](../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+Rancher 可以使用与其他集群一样的方式为 AKS 集群配置成员角色。有关详细信息,请参阅 [RBAC](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
## 云凭证
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
index 5de68eb41ae..be8365d0724 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md
@@ -2,7 +2,7 @@
title: EKS 集群配置参考
---
-### 账号访问
+## 账号访问
使用获取的信息为 IAM 策略填写每个下拉列表和字段:
@@ -11,7 +11,7 @@ title: EKS 集群配置参考
| 区域 | 从下拉列表中选择构建集群的地理区域。 |
| 云凭证 | 选择为 IAM 策略创建的云凭证。有关在 Rancher 中创建云凭证的更多信息,请参阅[此页面](../../user-settings/manage-cloud-credentials.md)。 |
-### 服务角色
+## 服务角色
选择一个[服务角色](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html)。
@@ -20,15 +20,15 @@ title: EKS 集群配置参考
| Standard:Rancher 生成的服务角色 | 如果选择此角色,Rancher 会自动添加一个服务角色以供集群使用。 |
| 自定义:从现有的服务角色中选择 | 如果选择此角色,Rancher 将允许你从已在 AWS 中创建的服务角色中进行选择。有关在 AWS 中创建自定义服务角色的更多信息,请参阅 [Amazon 文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role)。 |
-### 密文加密
+## 密文加密
可选:要加密密文,请选择或输入在 [AWS 密钥管理服务 (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) 中创建的密钥。
-### API Server 端点访问
+## API Server 端点访问
配置公共/私有 API 访问是一个高级用例。有关详细信息,请参阅 [EKS 集群端点访问控制文档](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html)。
-### 专用 API 端点
+## 专用 API 端点
如果你在创建集群时启用了私有 API 端点访问,并禁用了公共 API 端点访问,那么你必须进行额外的步骤才能使 Rancher 成功连接到集群。在这种情况下,一个弹窗将会显示,其中包含需要在要注册到 Rancher 的集群上运行的命令。配置集群后,你可以在任何能连接到集群的 Kubernetes API 的地方运行显示的命令。
@@ -36,7 +36,7 @@ title: EKS 集群配置参考
- 在创建集群时,创建具有私有和公共 API 端点访问权限的集群。在集群创建并处于 active 状态后,你可以禁用公共访问,Rancher 将能继续与 EKS 集群通信。
- 确保 Rancher 与 EKS 集群共享同一个子网。然后,你可以使用安全组使 Rancher 能够与集群的 API 端点进行通信。在这种情况下,你不需要运行注册集群的命令,Rancher 就能够与你的集群通信。有关配置安全组的更多信息,请参阅[安全组文档](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)。
-### 公共访问端点
+## 公共访问端点
你也可以选择通过显式 CIDR 块来限制对公共端点的访问。
@@ -48,7 +48,7 @@ title: EKS 集群配置参考
有关对集群端点的公共和私有访问的更多信息,请参阅 [Amazon EKS 文档](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html)。
-### 子网
+## 子网
| 选项 | 描述 |
| ------- | ------------ |
@@ -60,7 +60,7 @@ title: EKS 集群配置参考
- [什么是 Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html)
- [VPC 和子网](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)
-### 安全组
+## 安全组
Amazon 文档:
@@ -68,7 +68,7 @@ Amazon 文档:
- [VPC 的安全组](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html)
- [创建安全组](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group)
-### Logging
+## Logging
将 controlplane 日志配置为发送到 Amazon CloudWatch。如果你将集群日志发送到 CloudWatch Logs,你需要按照 standard CloudWatch Logs 支付数据引入和存储费用。
@@ -76,13 +76,13 @@ Amazon 文档:
有关 EKS controlplane 日志管理的更多信息,请参阅[官方文档](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)。
-### 托管节点组
+## 托管节点组
Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(Amazon EC2 实例)进行预置和生命周期管理。
有关节点组如何工作以及如何配置的更多信息,请参阅 [EKS 文档](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html)。
-#### 使用你自己的启动模板
+### 使用你自己的启动模板
你可以提供启动模板 ID 和版本,以便轻松配置节点组中的 EC2 实例。如果你提供了启动模板,则以下设置都无法在 Rancher 中进行配置。因此,如果你使用启动模板,则需要在启动模板中指定以下列表中的所有必须和所需的设置。另请注意,如果提供了启动模板 ID 和版本,则只能更新模板版本。如果要使用新模板 ID,则需要创建新的托管节点组。
@@ -95,11 +95,11 @@ Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(A
| 用户数据 | [MIME 多部分格式](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)的 Cloud init 脚本。 | 选填 |
| 实例资源标签 | 标记节点组中的每个 EC2 实例。 | 选填 |
-#### Rancher 管理的启动模板
+### Rancher 管理的启动模板
如果你不指定启动模板,你将能够在 Rancher UI 中配置上述选项,并且可以在创建后更新所有这些选项。为了利用所有这些选项,Rancher 将为你创建和管理启动模板。Rancher 中的所有集群都将有一个 Rancher 管理的启动模板。此外,每个没有指定启动模板的托管节点组都将具有一个管理的启动模板版本。此启动模板的名称将具有 “rancher-managed-lt-” 前缀,后面是集群的显示名称。此外,Rancher 管理的启动模板将使用 “rancher-managed-template” 键和 “do-not-modify-or-delete” 值来进行标记,以将其识别为 Rancher 管理的启动模板。请注意,不要修改或删除此启动模板,或将此启动模板与其他集群或托管节点组一起使用。因为这可能会使你的节点组“降级”并需要销毁和重新创建。
-#### 自定义 AMI
+### 自定义 AMI
如果你在启动模板或 Rancher 中指定了自定义 AMI,则必须[正确配置](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/)镜像,并且必须提供用户数据以[引导节点](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami)。这是一个高级用例,因此你必须要了解其要求。
@@ -111,7 +111,7 @@ Amazon EKS 托管的节点组自动为 Amazon EKS Kubernetes 集群的节点(A
:::
-#### Spot 实例
+### Spot 实例
Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot)。如果你指定了启动模板,Amazon 建议不要在模板中提供实例类型。相反,Amazon 建议提供多种实例类型。如果你为节点组启用了“请求 Spot 实例”复选框,那么你将有机会提供多种实例类型。
@@ -121,7 +121,7 @@ Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/usergui
:::
-#### 节点组设置
+### 节点组设置
以下设置也是可配置的。在创建节点组后,除“节点组名称”外的所有选项都是可编辑的。
@@ -135,7 +135,7 @@ Spot 实例现在[受 EKS 支持](https://docs.aws.amazon.com/eks/latest/usergui
| Tags | 管理的节点组的标签,这些标签不会传播到任何相关资源。 |
-### 配置刷新间隔
+## 配置刷新间隔
`eks-refresh-cron` 设置已弃用。它已迁移到 `eks-refresh` 设置,这是一个表示秒的整数。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
index fa562ae9cee..6cb63287e57 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md
@@ -4,11 +4,11 @@ title: 私有集群
在 GKE 中,[私有集群](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept)是一种集群,其节点仅通过分配内部 IP 地址与入站和出站流量相隔离。GKE 中的私有集群可以选择将 controlplane 端点作为公开访问的地址或作为私有地址。这与其他 Kubernetes 提供商不同,后者可能将具有私有 controlplane 端点的集群称为“私有集群”,但仍允许进出节点的流量。基于你的组织的网络和安全要求,你可能想创建一个有私有节点的集群,其中有或没有公共 controlplane 端点。从 Rancher 配置的 GKE 集群可以通过在**集群选项**中选择**私有集群**(在**显示高级选项**下)来使用隔离的节点。通过选择**启用私有端点**,可以选择将 controlplane 端点设为私有。
-### 私有节点
+## 私有节点
由于私有集群中的节点只有内部 IP 地址,它们将无法安装 cluster agent,Rancher 将无法完全管理集群。这可以通过几种方式来处理。
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -18,7 +18,7 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
如果限制外出的互联网访问对你的组织来说不是一个问题,可以使用 Google 的 [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) 服务来允许私有网络中的节点访问互联网,使它们能够从 Dockerhub 下载所需的镜像并与 Rancher management server 通信。这是最简单的解决方案。
-#### 私有镜像仓库
+### 私有镜像仓库
:::caution
@@ -26,13 +26,13 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
:::
-如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。
+如果要求限制节点的传入和传出流量,请按照离线安装说明,在集群所在的 VPC 上设置一个私有容器[镜像仓库](../../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md),从而允许集群节点访问和下载运行 cluster agent 所需的镜像。如果 controlplane 端点也是私有的,Rancher 将需要[直接访问](#直接访问)它。
-### 私有 controlplane 端点
+## 私有 controlplane 端点
如果集群暴露了公共端点,Rancher 将能够访问集群,且无需执行额外的步骤。但是,如果集群没有公共端点,则必须确保 Rancher 可以访问集群。
-#### Cloud NAT
+### Cloud NAT
:::caution
@@ -42,7 +42,7 @@ Cloud NAT 将[产生费用](https://cloud.google.com/nat/pricing)。
如上所述,如果不考虑限制对节点的传出互联网访问,则可以使用 Google 的 [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) 服务来允许节点访问互联网。当集群进行配置时,Rancher 将提供一个在集群上运行的注册命令。下载新集群的 [kubeconfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) 并在集群上运行提供的 kubectl 命令。如果要通过获取集群访问权来运行此命令,你可以创建临时节点或使用 VPC 中的现有节点,或者登录到某个集群节点或使用某个集群节点创建 SSH 隧道。
-#### 直接访问
+### 直接访问
如果 Rancher server 与集群的 controlplane 运行在同一 VPC 上,它将直接访问 controlplane 的私有端点。集群节点将需要访问[私有镜像仓库](#私有镜像仓库)以下载上述的镜像。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
index 7cdc1ec7c64..6d4f2525bed 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md
@@ -2,7 +2,7 @@
title: RKE 集群配置参考
---
-Rancher 安装 Kubernetes 时,它使用 [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 或 [RKE2](https://docs.rke2.io/) 作为 Kubernetes 发行版。
+Rancher 安装 Kubernetes 时,它使用 [RKE](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 或 [RKE2](https://docs.rke2.io/) 作为 Kubernetes 发行版。
本文介绍 Rancher 中可用于新的或现有的 RKE Kubernetes 集群的配置选项。
@@ -16,7 +16,7 @@ Rancher 安装 Kubernetes 时,它使用 [RKE](../../../pages-for-subheaders/la
RKE 集群配置选项嵌套在 `rancher_kubernetes_engine_config` 参数下。有关详细信息,请参阅[集群配置文件](#rke-集群配置文件参考)。
-在 [RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中,你可以编辑任何后续剩余的选项。
+在 [RKE 启动的集群](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)中,你可以编辑任何后续剩余的选项。
有关 RKE 配置文件语法的示例,请参阅 [RKE 文档](https://rancher.com/docs/rke/latest/en/example-yamls/)。
@@ -88,7 +88,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
### Kubernetes 云提供商
-你可以配置 [Kubernetes 云提供商](../../../pages-for-subheaders/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
+你可以配置 [Kubernetes 云提供商](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
:::note
@@ -131,7 +131,7 @@ Rancher v2.6 引入了[为 RKE 集群配置 ECR 镜像仓库](https://rancher.co
### 节点池
-有关使用 Rancher UI 在 RKE 集群中设置节点池的信息,请参阅[此页面](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md)。
+有关使用 Rancher UI 在 RKE 集群中设置节点池的信息,请参阅[此页面](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)。
### NGINX Ingress
@@ -325,7 +325,7 @@ windows_prefered_cluster: false
### enable_cluster_monitoring
-启用或禁用[集群监控](../../../pages-for-subheaders/monitoring-and-alerting.md)的选项。
+启用或禁用[集群监控](../../../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)的选项。
### enable_network_policy
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
index 3f4328512ae..7a872f77ea2 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md
@@ -110,13 +110,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
所有 CNI 网络插件都支持[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)网络。要在双栈模式下配置 RKE2,请为你的[集群 CIDR](#集群-cidr) 和/或 [Service CIDR](#service-cidr) 设置有效的 IPv4/IPv6 CIDR。
-###### 额外配置 {#dual-stack-additional-config}
+###### 额外配置
使用 `cilium` 或 `multus,cilium` 作为容器网络接口提供商时,请确保**启用 IPv6 支持**选项。
#### 云提供商
-你可以配置 [Kubernetes 云提供商](../../../pages-for-subheaders/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
+你可以配置 [Kubernetes 云提供商](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/set-up-cloud-providers.md)。如果你想在 Kubernetes 中使用动态配置的[卷和存储](../../../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md),你通常需要选择特定的云提供商。例如,如果你想使用 Amazon EBS,则需要选择 `aws` 云提供商。
:::note
@@ -130,7 +130,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### Worker CIS 配置文件
-选择一个 [CIS benchmark](../../../pages-for-subheaders/cis-scan-guides.md) 来验证系统配置。
+选择一个 [CIS benchmark](../../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md) 来验证系统配置。
#### 项目网络隔离
@@ -140,13 +140,13 @@ Rancher 与以下开箱即用的网络提供商兼容:
#### CoreDNS
-默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking#coredns)。
+默认情况下,[CoreDNS](https://coredns.io/) 会安装为默认 DNS 提供程序。如果未安装 CoreDNS,则必须自己安装备用 DNS 提供程序。有关其他 CoreDNS 配置,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#coredns)。
#### NGINX Ingress
-如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+如果你想使用高可用性配置来发布应用,并且你使用没有原生负载均衡功能的云提供商来托管主机,请启用此选项以在集群中使用 NGINX Ingress。有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
-有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking#nginx-ingress-controller)。
+有关其他配置选项,请参阅 [RKE2 文档](https://docs.rke2.io/networking/networking_services#nginx-ingress-controller)。
#### Metrics Server
@@ -182,7 +182,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
要配置[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)模式,请输入有效的 IPv4/IPv6 CIDR。例如 `10.42.0.0/16,2001:cafe:42:0::/56`。
-使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#dual-stack-additional-config)。
+使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#额外配置)。
#### Service CIDR
@@ -192,7 +192,7 @@ Rancher 与以下开箱即用的网络提供商兼容:
要配置[双栈](https://docs.rke2.io/install/network_options#dual-stack-configuration)模式,请输入有效的 IPv4/IPv6 CIDR。例如 `10.42.0.0/16,2001:cafe:42:0::/56`。
-使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#dual-stack-additional-config)。
+使用 `cilium` 或 `multus,cilium` 作为[容器网络](#容器网络提供商)接口提供商时,你需要进行[附加配置](#额外配置)。
#### 集群 DNS
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
index 823aaa2de81..af2782a81b3 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md
@@ -8,7 +8,7 @@ title: 同步
如果你同时处理来自另一个来源的更新,你可能会不小心覆盖一个来源的状态。如果你在完成一个来源的更新后 5 分钟内处理另一个来源的更新,也可能会发生这种情况。
:::
-### 工作原理
+## 工作原理
要理解同步是如何工作的,则必须理解 Rancher Cluster 对象上的两个字段:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
index c301e408d1f..895c75d5c0e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md
@@ -2,7 +2,7 @@
title: Rancher Agent 选项
---
-Rancher 在每个节点上部署一个 Agent 来与节点通信。本文描述了可以传递给 Agent 的选项。要使用这些选项,你需要[使用自定义节点创建集群](../../../../pages-for-subheaders/use-existing-nodes.md),并在添加节点时将选项添加到生成的 `docker run` 命令。
+Rancher 在每个节点上部署一个 Agent 来与节点通信。本文描述了可以传递给 Agent 的选项。要使用这些选项,你需要[使用自定义节点创建集群](./use-existing-nodes.md),并在添加节点时将选项添加到生成的 `docker run` 命令。
有关 Rancher 如何使用 Node Agent 与下游集群通信的概述,请参阅[产品架构](../../../rancher-manager-architecture/communicating-with-downstream-user-clusters.md#3-node-agents)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
index c4e1efd5335..3a33851ca92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md
@@ -17,7 +17,7 @@ description: 要创建具有自定义节点的集群,你需要访问集群中
:::note 使用 Windows 主机作为 Kubernetes Worker 节点?
-在开始之前,请参阅[配置 Windows 自定义集群](use-windows-clusters.md)。
+在开始之前,请参阅[配置 Windows 自定义集群](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md)。
:::
@@ -137,5 +137,5 @@ Key=kubernetes.io/cluster/CLUSTERID, Value=shared
创建集群后,你可以通过 Rancher UI 访问集群。最佳实践建议你设置以下访问集群的备用方式:
-- **通过 kubectl CLI 访问你的集群**:按照[这些步骤](../../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-from-your-workstation)在你的工作站上使用 kubectl 访问集群。在这种情况下,你将通过 Rancher Server 的认证代理进行认证,然后 Rancher 会让你连接到下游集群。此方法允许你在没有 Rancher UI 的情况下管理集群。
-- **通过 kubectl CLI 使用授权的集群端点访问你的集群**:按照[这些步骤](../../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster)直接使用 kubectl 访问集群,而无需通过 Rancher 进行认证。我们建议设置此替代方法来访问集群,以便在无法连接到 Rancher 时访问集群。
+- **通过 kubectl CLI 访问你的集群**:按照[这些步骤](../../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#在工作站使用-kubectl-访问集群)在你的工作站上使用 kubectl 访问集群。在这种情况下,你将通过 Rancher Server 的认证代理进行认证,然后 Rancher 会让你连接到下游集群。此方法允许你在没有 Rancher UI 的情况下管理集群。
+- **通过 kubectl CLI 使用授权的集群端点访问你的集群**:按照[这些步骤](../../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)直接使用 kubectl 访问集群,而无需通过 Rancher 进行认证。我们建议设置此替代方法来访问集群,以便在无法连接到 Rancher 时访问集群。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/kubernetes-concepts.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/kubernetes-concepts.md
index 17a1bc2b969..5666ba5f4fc 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/kubernetes-concepts.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/kubernetes-concepts.md
@@ -53,7 +53,7 @@ controlplane 节点上运行 Kubernetes API server、scheduler 和 Controller Ma
- **Kubelets**:监控节点状态的 Agent,确保你的容器处于健康状态。
- **工作负载**:承载应用和其他 deployment 的容器和 Pod。
-Worker 节点也运行存储和网络驱动,有必要时也会运行 Ingress Controller。你可以根据需要,创建尽可能多的 worker 节点来运行你的[工作负载](../pages-for-subheaders/workloads-and-pods.md)。
+Worker 节点也运行存储和网络驱动,有必要时也会运行 Ingress Controller。你可以根据需要,创建尽可能多的 worker 节点来运行你的[工作负载](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/workloads-and-pods.md)。
## 关于 Helm
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md
index 98169f63376..ed044a40a65 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/examples.md
@@ -2,15 +2,15 @@
title: 示例
---
-### ServiceMonitor
+## ServiceMonitor
你可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)找到 ServiceMonitor 自定义资源的示例。
-### PodMonitor
+## PodMonitor
你可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml)找到 PodMonitor 示例,还可以在[此处](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml)找到引用它的 Prometheus 资源示例。
-### PrometheusRule
+## PrometheusRule
PrometheusRule 包含你通常放置在 [Prometheus 规则文件](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)中的告警和记录规则。
@@ -18,6 +18,6 @@ PrometheusRule 包含你通常放置在 [Prometheus 规则文件](https://promet
你可以在[此页面](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)找到 PrometheusRule 示例。
-### Alertmanager 配置
+## Alertmanager 配置
有关示例配置,请参阅[本节](./receivers.md#alertmanager-配置示例)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
index e514d60a928..b7377b610b1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md
@@ -14,7 +14,7 @@ ServiceMonitor 比 PodMonitor 更常用,推荐用于大多数用例。
:::
-### ServiceMonitor
+## ServiceMonitor
这个伪 CRD 映射到 Prometheus 自定义资源配置的一部分。它以声明方式指定应如何监控 Kubernetes 服务组。
@@ -24,7 +24,7 @@ ServiceMonitor 比 PodMonitor 更常用,推荐用于大多数用例。
有关 ServiceMonitor 工作原理的更多信息,请参阅 [Prometheus Operator 文档](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md)。
-### PodMonitor
+## PodMonitor
这个伪 CRD 映射到 Prometheus 自定义资源配置的一部分。它以声明方式指定应如何监控 Pod 组。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/prometheus-federator/rbac.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/prometheus-federator/rbac.md
index a519b5a8cbc..c7aea4be4be 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/prometheus-federator/rbac.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/prometheus-federator/rbac.md
@@ -4,7 +4,7 @@ title: RBAC
本文介绍 Prometheus Federator RBAC。
-如[命名空间](../../pages-for-subheaders/prometheus-federator.md#命名空间)部分所述,Prometheus Federator 期望集群中具有项目级别权限(例如,具有由单个标签选择器确定的命名空间组的权限)的项目所有者、项目成员和其他用户,除了项目 Registration 命名空间(默认导入到项目中)和那些已经包含其项目的命名空间之外,在任何其他命名空间中都只有最低权限。因此,为了让项目所有者将特定 Chart 权限分配给其项目命名空间中的其他用户,Helm Project Operator 将自动监视以下绑定:
+如[命名空间](./prometheus-federator.md#命名空间)部分所述,Prometheus Federator 期望集群中具有项目级别权限(例如,具有由单个标签选择器确定的命名空间组的权限)的项目所有者、项目成员和其他用户,除了项目 Registration 命名空间(默认导入到项目中)和那些已经包含其项目的命名空间之外,在任何其他命名空间中都只有最低权限。因此,为了让项目所有者将特定 Chart 权限分配给其项目命名空间中的其他用户,Helm Project Operator 将自动监视以下绑定:
- ClusterRoleBindings
- 项目发布命名空间中的 RoleBindings
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-cluster-tools.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-cluster-tools.md
index 7f1ed5206c7..c3717e78452 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-cluster-tools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-cluster-tools.md
@@ -17,7 +17,7 @@ Logging 支持:
Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
-有关详细信息,请参阅 [Logging 文档](../pages-for-subheaders/logging.md)。
+有关详细信息,请参阅 [Logging 文档](../integrations-in-rancher/logging/logging.md)。
## 监控和告警
你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。
@@ -28,7 +28,7 @@ Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
告警是触发这些通知的规则。在接收告警之前,你必须在 Rancher 中配置一个或多个通知器。你可以在集群或项目级别设置告警范围。
-如需更多信息,请参阅[监控文档](../pages-for-subheaders/monitoring-and-alerting.md)。
+如需更多信息,请参阅[监控文档](../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
## Istio
@@ -36,7 +36,7 @@ Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
Rancher v2.5 改进了与 Istio 的集成。
-如需更多信息,请参阅 [Istio 文档](../pages-for-subheaders/istio.md)。
+如需更多信息,请参阅 [Istio 文档](../integrations-in-rancher/istio/istio.md)。
## OPA Gatekeeper
[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) 是一个开源项目,它对 OPA 和 Kubernetes 进行了集成,以通过许可控制器 Webhook 提供策略控制。有关如何在 Rancher 中启用 Gatekeeper 的详细信息,请参阅 [OPA Gatekeeper](../integrations-in-rancher/opa-gatekeeper.md)。
@@ -45,4 +45,4 @@ Rancher v2.5 改进了与 Istio 的集成。
Rancher 可以通过运行安全扫描来检查 Kubernetes 是否按照 CIS Kubernetes Benchmark 中定义的安全最佳实践进行部署。
-如需更多信息,请参阅 [CIS 扫描文档](../pages-for-subheaders/cis-scan-guides.md)。
\ No newline at end of file
+如需更多信息,请参阅 [CIS 扫描文档](../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/architecture-recommendations.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/architecture-recommendations.md
index 10ae10563b0..7e61817fbe6 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/architecture-recommendations.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/architecture-recommendations.md
@@ -53,7 +53,7 @@ title: 架构推荐
我们强烈建议你把 Rancher 安装到托管在云提供商(如 AWS EC2 和 Google Compute Engine(GCE)等)上的 Kubernetes 集群上。
-为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
+为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
## Kubernetes 安装的推荐节点角色
@@ -95,7 +95,7 @@ RKE 每个角色至少需要一个节点,但并不强制每个节点只能有
由于 Rancher Server 集群中没有部署其他工作负载,因此在大多数情况下,这个集群都不需要使用我们出于可扩展性和可用性的考虑,而为下游集群推荐的架构。
-有关下游集群的最佳实践,请查看[生产环境清单](../../pages-for-subheaders/checklist-for-production-ready-clusters.md)或[最佳实践](../../pages-for-subheaders/best-practices.md)。
+有关下游集群的最佳实践,请查看[生产环境清单](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md)或[最佳实践](../best-practices/best-practices.md)。
## 授权集群端点架构
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
index 56800763102..9d7570ba151 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md
@@ -17,6 +17,8 @@ title: 与下游集群通信
3. [Node Agents](#3-node-agents)
4. [授权集群端点](#4-授权集群端点)
+## Components
+
### 1. 认证代理
在此图中,名为 Bob 的用户希望查看在名为 User Cluster 1 的下游集群上运行的所有 Pod。在 Rancher 中,他可以运行 `kubectl` 命令来查看
@@ -58,7 +60,7 @@ Cluster Agent,也叫做 `cattle-cluster-agent`,是运行在下游集群中
授权集群端点(ACE)可连接到下游集群的 Kubernetes API Server,而不用通过 Rancher 认证代理调度请求。
-> 授权集群端点仅适用于 Rancher 启动的 Kubernetes 集群,即只适用于 Rancher [使用 RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 来配置的集群。它不适用于导入的集群,也不适用于托管在 Kubernetes 提供商中的集群(例如 Amazon 的 EKS)。
+> 授权集群端点仅适用于 Rancher 启动的 Kubernetes 集群,即只适用于 Rancher [使用 RKE](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 来配置的集群。它不适用于导入的集群,也不适用于托管在 Kubernetes 提供商中的集群(例如 Amazon 的 EKS)。
授权集群端点的主要用途:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
index b715f4be1e4..ef25d5ea4c1 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-manager-architecture/rancher-server-and-components.md
@@ -6,9 +6,9 @@ title: Rancher Server 和 Components
下图展示了 Rancher 2.x 的上层架构。下图中,Rancher Server 管理两个下游 Kubernetes 集群,其中一个由 RKE 创建,另一个由 Amazon EKS 创建。
-为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
+为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 创建一个专用的 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
-下图介绍了用户如何通过 Rancher 的认证代理管理 [Rancher 启动的 Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) 集群和[托管的 Kubernetes](../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) 集群:
+下图介绍了用户如何通过 Rancher 的认证代理管理 [Rancher 启动的 Kubernetes](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 集群和[托管的 Kubernetes](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md) 集群:
通过 Rancher 的认证代理管理 Kubernetes 集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-project-tools.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-project-tools.md
index ad05d4166e0..fd99a83952e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-project-tools.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-project-tools.md
@@ -25,8 +25,8 @@ Logging 支持:
Rancher 可以与 Elasticsearch、splunk、kafka、syslog 和 fluentd 集成。
-有关详细信息,请参阅 [Logging](../pages-for-subheaders/logging.md)。
+有关详细信息,请参阅 [Logging](../integrations-in-rancher/logging/logging.md)。
## Monitoring
-你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。有关详细信息,请参阅 [Monitoring](../pages-for-subheaders/monitoring-and-alerting.md)。
+你可以使用 Rancher,通过业界领先并开源的 [Prometheus](https://prometheus.io/) 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。有关详细信息,请参阅 [Monitoring](../integrations-in-rancher/monitoring-and-alerting/monitoring-and-alerting.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
index d40edd47e96..d3c891e156c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-hardening-guide.md
@@ -102,7 +102,7 @@ Rancher 提供了 `rancher-restricted` 模板,用于强制执行高度限制
-K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://v1-24.docs.kubernetes.io/docs/concepts/security/pod-security-policy/) 以控制 Pod 安全性。
+K3s v1.24 及更早版本支持 [Pod 安全策略 (PSP)](https://github.com/kubernetes/website/blob/release-1.24/content/en/docs/concepts/security/pod-security-policy.md) 以控制 Pod 安全性。
你可以在 Rancher 中通过集群配置,传递以下标志来启用 PSPs:
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/kubernetes-security-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/kubernetes-security-best-practices.md
index af9746e899f..5b53a6b0467 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/kubernetes-security-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/kubernetes-security-best-practices.md
@@ -2,7 +2,7 @@
title: Kubernetes 安全最佳实践
---
-### 限制云元数据 API 访问
+## 限制云元数据 API 访问
AWS、Azure、DigitalOcean 或 GCP 等云提供商通常会在本地向实例公开元数据服务。默认情况下,此端点可被运行在云实例上的 pod 访问,包括在托管的 Kubernetes(如 EKS、AKS、DigitalOcean Kubernetes 或 GKE)中的 pod,并且可以包含该节点的云凭证、配置数据(如 kubelet 凭证)以及其他敏感数据。为了降低在云平台上运行的这种风险,请遵循 [Kubernetes 安全建议](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/#restricting-cloud-metadata-api-access),即限制授予实例凭证的权限,使用网络策略限制 pod 对元数据 API 的访问,并避免使用配置数据来传递密文。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security-best-practices.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security-best-practices.md
index 0d98495ffec..9b13fb3e842 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security-best-practices.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security-best-practices.md
@@ -6,7 +6,7 @@ title: Rancher 安全最佳实践
-### 限制对 /version 和 /rancherversion 的公共访问
+## 限制对 /version 和 /rancherversion 的公共访问
上游(本地) Rancher 实例提供正在运行的 Rancher 版本和用于构建它的 Go 版本信息。这些信息可以通过 `/version` 路径访问,该路径用于诸如自动化版本升级或确认部署成功等任务。上游实例还提供了可通过 `/rancherversion` 路径访问的 Rancher 版本信息。
@@ -14,7 +14,7 @@ title: Rancher 安全最佳实践
更多关于保护服务器的详细信息,请参阅 [OWASP Web Application Security Testing - Enumerate Infrastructure and Application Admin Interfaces](https://owasp.org/www-project-web-security-testing-guide/stable/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/05-Enumerate_Infrastructure_and_Application_Admin_Interfaces.html)。
-### 会话管理
+## 会话管理
某些环境可能需要额外的安全控制来管理会话。例如,你可能希望限制用户的并发活动会话或限制可以从哪些地理位置发起这些会话。Rancher 默认情况下不支持这些功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security.md
index 935aaa2b780..85c1e15e37c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.8/reference-guides/rancher-security/rancher-security.md
@@ -27,11 +27,11 @@ title: 安全
本文介绍了安全相关的文档以及资源,让你的 Rancher 安装和下游 Kubernetes 集群更加安全。
-### NeuVector 与 Rancher 的集成
+## NeuVector 与 Rancher 的集成
NeuVector 是一个开源的、以容器为中心的安全应用程序,现已集成到 Rancher 中。NeuVector 提供生产安全、DevOps 漏洞保护和容器防火墙等功能。请参阅 [Rancher 文档](../../integrations-in-rancher/neuvector/neuvector.md) 和 [NeuVector 文档](https://open-docs.neuvector.com/)了解更多信息。
-### 在 Kubernetes 集群上运行 CIS 安全扫描
+## 在 Kubernetes 集群上运行 CIS 安全扫描
Rancher 使用 [kube-bench](https://github.com/aquasecurity/kube-bench) 来运行安全扫描,从而检查 Kubernetes 是否按照 [CIS](https://www.cisecurity.org/cis-benchmarks/)(Center for Internet Security,互联网安全中心)Kubernetes Benchmark 中定义的安全最佳实践进行部署。
@@ -47,13 +47,13 @@ Rancher 在集群上运行 CIS 安全扫描时会生成一份报告,该报告
有关详细信息,请参阅[安全扫描](../../how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md)。
-### SELinux RPM
+## SELinux RPM
[安全增强型 Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) 是对 Linux 的安全增强。被政府机构使用之后,SELinux 已成为行业标准,并在 CentOS 7 和 8 上默认启用。
我们提供了 `rancher-selinux` 和 `rke2-selinux` 两个 RPM(Red Hat 软件包),让 Rancher 产品能够在 SELinux 主机上正常运行。有关详细信息,请参阅[此页面](selinux-rpm/selinux-rpm.md)。
-### Rancher 加固指南
+## Rancher 加固指南
Rancher 加固指南基于 CIS Kubernetes Benchmark 。
@@ -63,7 +63,7 @@ Rancher 加固指南基于
-
-import {Redirect} from '@docusaurus/router';
-
-const Home = () => {
-return ;
-};
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/api-reference.mdx b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/api-reference.mdx
new file mode 100644
index 00000000000..df4dc6e4f93
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/api-reference.mdx
@@ -0,0 +1,17 @@
+---
+title: API 参考
+---
+
+
+
+
+
+:::note
+
+目前,并非所有的 Rancher 资源都可以通过 Rancher Kubernetes API 操作。
+
+:::
+
+import ApiDocMdx from '@theme/ApiDocMdx';
+
+
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/quickstart.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/quickstart.md
new file mode 100644
index 00000000000..20ca51cc34f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/quickstart.md
@@ -0,0 +1,152 @@
+---
+title: API 快速入门指南
+---
+
+
+
+
+
+你可以通过 Kubernetes API 访问 Rancher 的资源。本指南将帮助你以 Rancher 用户的身份开始使用此 API。
+
+1. 在左上角,点击 **☰ > 全局设置**.
+2. 找到 `server-url` 字段并复制其地址。
+3. [创建](../reference-guides/user-settings/api-keys.md#创建-api-密钥)一个没有作用域的 Rancher API 密钥。
+
+ :::danger
+
+ 没有作用域的 Rancher API 密钥授予用户可以访问的所有资源的无限制的访问权限。为防止未经授权的使用,此密钥应安全存储并经常轮换。
+
+ :::
+
+4. 创建一个 `kubeconfig.yaml` 文件,将 `$SERVER_URL` 替换成上面从全局设置中复制的地址,并且将 `$API_KEY` 替换为上面创建的 Rancher API 密钥:
+
+ ```yaml
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - name: "rancher"
+ cluster:
+ server: "$SERVER_URL"
+
+ users:
+ - name: "rancher"
+ user:
+ token: "$API_KEY"
+
+ contexts:
+ - name: "rancher"
+ context:
+ user: "rancher"
+ cluster: "rancher"
+
+ current-context: "rancher"
+ ```
+
+你可以使用任何兼容的工具来引用这个文件,例如 kubectl 或 [client-go](https://github.com/kubernetes/client-go)。快速演示内容请参阅 [kubectl 示例](#api-kubectl-示例)
+
+更多有关处理更复杂证书的设置信息,请参阅[指定 CA 证书](#指定-ca-证书)。
+
+更多关于可用的 kubeconfig 选项,请参阅[上游文档](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)。
+
+## API kubectl 示例
+
+在此示例中,我们将展示如何使用 kubectl 创建一个项目,然后删除它。关于其他可用的 Rancher 资源列表,请参阅 [API 参考](./api-reference.mdx)。
+
+:::note
+
+目前,并非所有的 Rancher 资源都可以通过 Rancher Kubernetes API 操作。
+
+:::
+
+1. 将 KUBECONFIG 环境变量设置为刚才创建的 kubeconfig 文件:
+
+ ```bash
+ export KUBECONFIG=$(pwd)/kubeconfig.yaml
+ ```
+
+2. 使用 `kubectl explain` 查看项目的可用字段,或者复杂资源的子字段:
+
+ ```bash
+ kubectl explain projects
+ kubectl explain projects.spec
+ ```
+
+不是所有的资源都有详细的输出。
+
+3. 在名称为 `project.yaml` 的文件中添加以下内容:
+
+ ```yaml
+ apiVersion: management.cattle.io/v3
+ kind: Project
+ metadata:
+ # name 应在每个集群的所有项目中都是唯一的
+ name: p-abc123
+ # generateName 可以替代 `name` 来随机生成一个名称
+ # generateName: p-
+ # namespace 应与 spec.ClusterName 匹配
+ namespace: local
+ spec:
+ # clusterName 应与目标集群的 `metadata.Name` 匹配
+ clusterName: local
+ description: Example Project
+ # displayName 是人类可读的名称并且从 UI 中显示
+ displayName: Example
+ ```
+
+4. 创建项目:
+
+ ```bash
+ kubectl create -f project.yaml
+ ```
+
+5. 删除项目:
+
+ 项目删除的方式取决于项目名称的创建方式。
+
+ **A. 如果在创建项目时使用 `name`**:
+
+ ```bash
+ kubectl delete -f project.yaml
+ ```
+
+ **B. 如果你使用 `generateName`**:
+
+ 将 `$PROJECT_NAME` 替换为 kubectl 创建项目后随机生成的项目名称。
+
+ ```bash
+ kubectl delete project $PROJECT_NAME -n local
+ ```
+
+## 指定 CA 证书
+
+为确保你的工具能够识别 Rancher 的 CA 证书,大多数设置都需要对上述模板进行额外修改。
+
+1. 在左上角点击 **☰ > 全局设置**.
+2. 查找并复制 `ca-certs` 字段中的值。
+3. 将复制的值保存在名称为 `rancher.crt` 的文件中。
+
+ :::note
+ 如果你的 Rancher 实例由其他服务代理,你必须提取该服务正在使用的证书,并将其添加到 kubeconfig 文件中,如步骤 5 所示。
+ :::
+
+4. 以下命令会将 `rancher.crt` 转换为 base64 输出,除去所有换行符,并使用证书内容更新 kubeconfig 中的 cluster 选项,然后删除 `rancher.crt` 文件:
+
+ ```bash
+ export KUBECONFIG=$PATH_TO_RANCHER_KUBECONFIG
+ kubectl config set clusters.rancher.certificate-authority-data $(cat rancher.crt | base64 -i - | tr -d '\n')
+ rm rancher.crt
+ ```
+5. (可选项)如果你使用不受系统信任的自签名证书,则可以通过 kubectl 在 kubeconfig 中设置不安全选项:
+
+ :::danger
+
+ 此选项不应该在生产环境中使用,因为它存在安全风险。
+
+ :::
+
+ ```bash
+ export KUBECONFIG=$PATH_TO_RANCHER_KUBECONFIG
+ kubectl config set clusters.rancher.insecure-skip-tls-verify true
+ ```
+
+ 如果你的 Rancher 实例由其他服务代理,你必须提取该服务正在使用的证书,并如上面演示的方法,将其添加到 kubeconfig 文件中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/workflows/projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/workflows/projects.md
new file mode 100644
index 00000000000..27cd6e5c2a9
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/api/workflows/projects.md
@@ -0,0 +1,135 @@
+---
+title: 项目
+---
+
+
+
+
+
+## 创建项目
+
+项目资源只能在管理集群上创建,请参考下文了解如何[在管理集群中的项目下创建命名空间](#在项目中创建命名空间)
+
+### 创建一个基本项目
+
+```bash
+kubectl create -f - <:`
+
+## 删除项目
+
+在集群命名空间中查找要删除的项目:
+
+```bash
+kubectl --namespace c-m-abcde get projects
+```
+
+删除集群命名空间下的项目:
+
+```bash
+kubectl --namespace c-m-abcde delete project p-vwxyz
+```
+
+请注意此命令不会删除以前属于该项目的命名空间和资源。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/contribute-to-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/contribute-to-rancher.md
new file mode 100644
index 00000000000..baff7b735b9
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/contribute-to-rancher.md
@@ -0,0 +1,136 @@
+---
+title: 参与 Rancher 社区贡献
+---
+
+本文介绍了 Rancher 仓库和 Rancher 文档、如何构建 Rancher 仓库以及提交 issue 时要包含哪些信息。
+
+有关如何为 Rancher 项目开发做出贡献的更多详细信息,请参阅 [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki)。Wiki 包含以下主题的资源:
+
+- 如何搭建 Rancher 开发环境并运行测试
+- Issue 在开发生命周期中的典型流程
+- 编码指南和开发最佳实践
+- 调试和故障排除
+- 开发 Rancher API
+
+在 Rancher Users Slack 上,开发者的频道是 **#developer**。
+
+## Rancher 文档
+
+如果你对此网站上的文档有建议,请在主 [Rancher 文档](https://github.com/rancher/rancher-docs)仓库中[提交 issue](https://github.com/rancher/rancher-docs/issues/new/choose)。此仓库包含 Rancher v2.0 及更高版本的文档。
+
+有关贡献和构建 Rancher v2.x 文档仓库的更多详细信息,请参阅 [Rancher 文档 README](https://github.com/rancher/rancher-docs#readme)。
+
+有关 Rancher v1.6 及更早版本的文档,请参阅 [Rancher 1.x docs](https://github.com/rancher/rancher.github.io) 仓库,其中包含 https://rancher.com/docs/rancher/v1.6/en/ 的源文件。
+
+## Rancher 仓库
+
+所有仓库都位于我们的主要 GitHub 组织内。Rancher 使用了很多仓库,以下是部分主要仓库的描述:
+
+| 仓库 | URL | 描述 |
+-----------|-----|-------------
+| Rancher | https://github.com/rancher/rancher | Rancher 2.x 的主要源码仓库。 |
+| Types | https://github.com/rancher/types | 包含 Rancher 2.x 的所有 API 类型的仓库。 |
+| API Framework | https://github.com/rancher/norman | API 框架,用于构建由 Kubernetes 自定义资源支持的 Rancher 风格的 API。 |
+| User Interface | https://github.com/rancher/dashboard/ | Dashboard UI 源码仓库。 |
+| (Rancher) Docker Machine | https://github.com/rancher/machine | 使用主机驱动时使用的 Docker Machine 二进制文件的源码仓库。这是 `docker/machine` 仓库的一个 fork。 |
+| machine-package | https://github.com/rancher/machine-package | 用于构建 Rancher Docker Machine 二进制文件。 |
+| kontainer-engine | https://github.com/rancher/kontainer-engine | kontainer-engine 的源码仓库,它是配置托管 Kubernetes 集群的工具。 |
+| RKE repository | https://github.com/rancher/rke | Rancher Kubernetes Engine 的源码仓库,该工具可在任何主机上配置 Kubernetes 集群。 |
+| CLI | https://github.com/rancher/cli | Rancher 2.x 中使用的 Rancher CLI 的源码仓库。 |
+| (Rancher) Helm repository | https://github.com/rancher/helm | 打包的 Helm 二进制文件的源码仓库。这是 `helm/helm` 仓库的一个 fork。 |
+| Telemetry repository | https://github.com/rancher/telemetry | Telemetry 二进制文件的源码仓库。 |
+| loglevel repository | https://github.com/rancher/loglevel | loglevel 二进制文件的源码仓库,用于动态更改日志级别。 |
+
+要查看 Rancher 使用的所有库/项目,请查看 `rancher/rancher` 仓库中的 [`go.mod` 文件](https://github.com/rancher/rancher/blob/master/go.mod)。
+
+
+用于配置/管理 Kubernetes 集群的 Rancher 组件。
+
+### 构建 Rancher 仓库
+
+每个仓库都应该有一个 Makefile,并且可以使用 `make` 命令进行构建。`make` 目标基于仓库中 `/scripts` 目录中的脚本,每个目标都使用 [Dapper](https://github.com/rancher/dapper) 在孤立的环境中运行。`Dockerfile.dapper` 将用于此操作,它包含了所需的所有构建工具。
+
+默认目标是 `ci`,它将运行 `./scripts/validate`、`./scripts/build`、`./scripts/test ` 和 `./scripts/package`。生成的二进制文件将在 `./build/bin` 中,通常也打包在 Docker 镜像中。
+
+### Rancher Bug、Issue 或疑问
+
+如果你发现任何 bug 或问题,由于有人可能遇到过同样的问题,或者我们已经正在寻找解决方案,因此请先在[已报告 issue](https://github.com/rancher/rancher/issues) 中搜索。
+
+如果找不到与你的问题相关的内容,请通过[提出 issue](https://github.com/rancher/rancher/issues/new) 与我们联系。与 Rancher 相关的仓库有很多,但请将 issue 提交到 Rancher 仓库中,这样能确保我们能看到这些 issue。如果你想就一个用例提出问题或询问其他用户,你可以在 [Rancher 论坛](https://forums.rancher.com)上发帖。
+
+#### 提交 Issue 的检查清单
+
+提交问题时请遵循此清单,以便我们调查和解决问题。如果你能提供更多信息,我们就可以使用更多数据来确定导致问题的原因或发现更多相关的内容。
+
+:::note
+
+如果数据量很大,请使用 [GitHub Gist](https://gist.github.com/) 或类似工具,并在 issue 中链接你创建的资源。
+
+:::
+
+:::note 重要提示:
+
+请删除所有敏感数据。
+
+:::
+
+- **资源**:请尽量详细地提供所使用的资源。导致问题的原因可能很多,因此请尽量提供更多细节来帮助我们确定根本原因。下面是一些参考示例:
+ - **主机**:主机的规格(例如 CPU/内存/磁盘),运行在什么云厂商上,使用的 Amazon Machine Image,使用的 DigitalOcean droplet,配置的镜像(复现时用于重新构建或使用)。
+ - **操作系统**:使用的是什么操作系统。在此处提供详细信息,例如 `cat /etc/os-release` 的输出(确切的操作系统版本)和 `uname -r` 的输出(确切的内核)。
+ - **Docker**:使用的 Docker 版本以及安装的方法。Docker 的大部分详情都可以在 `docker version` 和 `docker info` 的输出中找到。
+ - **环境**:是否使用了代理,是否使用可信的 CA/自签名证书,是否使用了外部负载均衡器。
+ - **Rancher**:使用的 Rancher 版本,可以在 UI 左下角或者从主机运行的 image 标签中获取。
+ - **集群**:创建了什么样的集群,如何创建的,在创建时指定了什么参数。
+- **复现 issue 的步骤**:尽量详细地说明你是如何触发所报告的情况的。这有助于复现你的情况。
+ - 提供从创建到你报告的情况使用的手动步骤或自动化脚本。
+- **日志**:提供使用资源的数据/日志。
+ - Rancher
+ - Docker 安装
+
+ ```
+ docker logs \
+ --timestamps \
+ $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }')
+ ```
+ - 使用 `kubectl` 的 Kubernetes 安装
+
+ :::note
+
+ 确保你配置了正确的 kubeconfig(例如,如果 Rancher 安装在 Kubernetes 集群上,则 `export KUBECONFIG=$PWD/kube_config_cluster.yml`)或通过 UI 使用了嵌入式 kubectl。
+
+ :::
+
+ ```
+ kubectl -n cattle-system \
+ logs \
+ -l app=rancher \
+ --timestamps=true
+ ```
+ - 在 RKE 集群的每个节点上使用 `docker` 的 Docker 安装
+
+ ```
+ docker logs \
+ --timestamps \
+ $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }')
+ ```
+ - 使用 RKE 附加组件的 Kubernetes 安装
+
+ :::note
+
+ 确保你配置了正确的 kubeconfig(例如,如果 Rancher Server 安装在 Kubernetes 集群上,则 `export KUBECONFIG=$PWD/kube_config_cluster.yml`)或通过 UI 使用了嵌入式 kubectl。
+
+ :::
+
+ ```
+ kubectl -n cattle-system \
+ logs \
+ --timestamps=true \
+ -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name')
+ ```
+ - 系统日志记录(可能不存在,取决于操作系统)
+ - `/var/log/messages`
+ - `/var/log/syslog`
+ - `/var/log/kern.log`
+ - Docker Daemon 日志记录(可能并不全部存在,取决于操作系统)
+ - `/var/log/docker.log`
+- **指标**:如果你遇到性能问题,请提供尽可能多的指标数据(文件或屏幕截图)来帮助我们确定问题。如果你遇到主机相关的问题,你可以提供 `top`、`free -m`、`df` 的输出,这些输出会显示进程/内存/磁盘的使用情况。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/container-network-interface-providers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/container-network-interface-providers.md
new file mode 100644
index 00000000000..ad796bb5c4c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/container-network-interface-providers.md
@@ -0,0 +1,204 @@
+---
+title: CNI 网络插件
+description: 了解容器网络接口 (CNI)、Rancher 提供的 CNI 网络插件、提供商的功能,以及如何选择网络提供商
+---
+
+## 什么是 CNI?
+
+CNI(容器网络接口)是一个[云原生计算基金会项目](https://cncf.io/),它包含了一些规范和库,用于编写在 Linux 容器中配置网络接口的一系列插件。CNI 只关注容器的网络连接,并在容器被删除时移除所分配的资源。
+
+Kubernetes 使用 CNI 作为网络提供商和 Kubernetes Pod 网络之间的接口。
+
+
+
+有关更多信息,请访问 [CNI GitHub 项目](https://github.com/containernetworking/cni)。
+
+## CNI 使用了哪些网络模型?
+
+CNI 网络插件使用封装网络模型(例如 Virtual Extensible Lan,缩写是 [VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan))或非封装网络模型(例如 Border Gateway Protocol,缩写是 [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol))来实现网络结构。
+
+### 什么是封装网络?
+
+此网络模型提供了一个逻辑二层(L2)网络,该网络封装在跨 Kubernetes 集群节点的现有三层(L3)网络拓扑上。使用此模型,你可以为容器提供一个隔离的 L2 网络,而无需分发路由。封装网络带来了少量的处理开销以及由于覆盖封装生成 IP header 造成的 IP 包大小增加。封装信息由 Kubernetes worker 之间的 UDP 端口分发,交换如何访问 MAC 地址的网络控制平面信息。此类网络模型中常用的封装是 VXLAN、Internet 协议安全性 (IPSec) 和 IP-in-IP。
+
+简单来说,这种网络模型在 Kubernetes worker 之间生成了一种扩展网桥,其中连接了 pod。
+
+如果你偏向使用扩展 L2 网桥,则可以选择此网络模型。此网络模型对 Kubernetes worker 的 L3 网络延迟很敏感。如果数据中心位于不同的地理位置,请确保它们之间的延迟较低,以避免最终的网络分段。
+
+使用这种网络模型的 CNI 网络插件包括 Flannel、Canal、Weave 和 Cilium。默认情况下,Calico 不会使用此模型,但你可以对其进行配置。
+
+
+
+### 什么是非封装网络?
+
+该网络模型提供了一个 L3 网络,用于在容器之间路由数据包。此模型不会生成隔离的 L2 网络,也不会产生开销。这些好处的代价是,Kubernetes worker 必须管理所需的所有路由分发。该网络模型不使用 IP header 进行封装,而是使用 Kubernetes Worker 之间的网络协议来分发路由信息以实现 Pod 连接,例如 [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)。
+
+简而言之,这种网络模型在 Kubernetes worker 之间生成了一种扩展网络路由器,提供了如何连接 Pod 的信息。
+
+如果你偏向使用 L3 网络,则可以选择此网络模型。此模型在操作系统级别为 Kubernetes Worker 动态更新路由。对延迟较不敏感。
+
+使用这种网络模型的 CNI 网络插件包括 Calico 和 Cilium。Cilium 可以使用此模型进行配置,即使这不是默认模式。
+
+
+
+## Rancher 提供哪些 CNI 插件?
+
+### RKE Kubernetes 集群
+
+Rancher 开箱即用地为 RKE Kubernetes 集群提供了几个 CNI 网络插件,分别是 Canal、Flannel、Calico 和 Weave。
+
+如果你使用 Rancher 创建新的 Kubernetes 集群,你可以选择你的 CNI 网络插件。
+
+#### Canal
+
+
+
+Canal 是一个 CNI 网络插件,它很好地结合了 Flannel 和 Calico 的优点。它让你轻松地将 Calico 和 Flannel 网络部署为统一的网络解决方案,将 Calico 的网络策略执行与 Calico(未封装)和 Flannel(封装)丰富的网络连接选项结合起来。
+
+Canal 是 Rancher 默认的 CNI 网络插件,并采用了 Flannel 和 VXLAN 封装。
+
+Kubernetes Worker 需要打开 UDP 端口 `8472` (VXLAN) 和 TCP 端口 `9099`(健康检查)。如果使用 Wireguard,则需要打开 UDP 端口 `51820` 和 `51821`。有关详细信息,请参阅[下游集群的端口要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md)。
+
+
+
+有关详细信息,请参阅 [Canal GitHub 页面](https://github.com/projectcalico/canal)。
+
+#### Flannel
+
+
+
+Flannel 是为 Kubernetes 配置 L3 网络结构的简单方法。Flannel 在每台主机上运行一个名为 flanneld 的二进制 Agent,该 Agent 负责从更大的预配置地址空间中为每台主机分配子网租约。Flannel 通过 Kubernetes API 或直接使用 etcd 来存储网络配置、分配的子网、以及其他辅助数据(例如主机的公共 IP)。数据包使用某种后端机制来转发,默认封装为 [VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan)。
+
+默认情况下,封装的流量是不加密的。Flannel 提供了两种加密方案:
+
+* [IPSec](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#ipsec):使用 [strongSwan](https://www.strongswan.org/) 在 Kubernetes worker 之间建立加密的 IPSec 隧道。它是加密的实验性后端。
+* [WireGuard](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard):比 strongSwan 更快的替代方案。
+
+Kubernetes Worker 需要打开 UDP 端口 `8472` (VXLAN)。有关详细信息,请参阅[下游集群的端口要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#网络要求)。
+
+
+
+有关详细信息,请参阅 [Flannel GitHub 页面](https://github.com/flannel-io/flannel)。
+
+#### Weave
+
+
+
+Weave 在云上的 Kubernetes 集群中启用网络和网络策略。此外,它还支持加密对等节点之间的流量。
+
+Kubernetes worker 需要打开 TCP 端口 `6783`(控制端口)、UDP 端口 `6783` 和 UDP 端口 `6784`(数据端口)。有关详细信息,请参阅[下游集群的端口要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#网络要求)。
+
+有关详细信息,请参阅以下页面:
+
+- [Weave Net 官网](https://www.weave.works/)
+
+### RKE2 Kubernetes 集群
+
+Rancher 开箱即用地为 RKE2 Kubernetes 集群提供了几个 CNI 网络插件,分别是 [Canal](#canal)(见上一节)、Calico 和 Cilium。
+
+如果你使用 Rancher 创建新的 Kubernetes 集群,你可以选择你的 CNI 网络插件。
+
+#### Calico
+
+
+
+Calico 在云上的 Kubernetes 集群中启用网络和网络策略。默认情况下,Calico 使用纯净、未封装的 IP 网络结构和策略引擎为 Kubernetes 工作负载提供网络。工作负载能够使用 BGP 在云上和本地进行通信。
+
+Calico 还提供了一种无状态的 IP-in-IP 或 VXLAN 封装模式。如果需要,你可以使用它。Calico 还支持策略隔离,让你使用高级 ingress 和 egress 策略保护和管理 Kubernetes 工作负载。
+
+如果使用 BGP,Kubernetes Worker 需要打开 TCP 端口 `179`,如果使用 VXLAN 封装,则需要打开 UDP 端口 `4789`。另外,使用 Typha 时需要 TCP 端口 `5473`。有关详细信息,请参阅[下游集群的端口要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#网络要求)。
+
+:::note 重要提示:
+
+在 Rancher 2.6.3 中,Calico 探测到在安装 RKE2 时 Windows 节点会失败。请注意,此问题已在 v2.6.4 中解决。
+
+- 要解决此问题,请先导航到 `https:///v3/settings/windows-rke2-install-script`。
+
+- 在那里,将当前设置 `https://raw.githubusercontent.com/rancher/wins/v0.1.3/install.ps1` 更改为新设置 `https://raw.githubusercontent .com/rancher/rke2/master/windows/rke2-install.ps1`。
+
+:::
+
+
+
+有关详细信息,请参阅以下页面:
+
+- [Project Calico 官方网站](https://www.projectcalico.org/)
+- [Calico 项目 GitHub 页面](https://github.com/projectcalico/calico)
+
+#### Cilium
+
+
+
+Cilium 在 Kubernetes 中启用网络和网络策略(L3、L4 和 L7)。默认情况下,Cilium 使用 eBPF 技术在节点内部路由数据包,并使用 VXLAN 将数据包发送到其他节点。你也可以配置非封装的技术。
+
+Cilium 推荐大于 5.2 的内核版本,从而充分利用 eBPF 的能力。Kubernetes worker 需要打开 TCP 端口 `8472`(VXLAN)和 TCP 端口 `4240`(健康检查)。此外,还必须为健康检查启用 ICMP 8/0。有关详细信息,请查看 [Cilium 系统要求](https://docs.cilium.io/en/latest/operations/system_requirements/#firewall-requirements)。
+
+##### Cilium 中跨节点的 Ingress 路由
+
+默认情况下,Cilium 不允许 Pod 与其他节点上的 Pod 通信。要解决此问题,请启用 Ingress Controller 以使用 “CiliumNetworkPolicy” 进行跨节点路由请求。
+
+选择 Cilium CNI 并为新集群启用项目网络隔离后,配置如下:
+
+```
+apiVersion: cilium.io/v2
+kind: CiliumNetworkPolicy
+metadata:
+ name: hn-nodes
+ namespace: default
+spec:
+ endpointSelector: {}
+ ingress:
+ - fromEntities:
+ - remote-node
+```
+
+## 各个网络插件的 CNI 功能
+
+下表总结了 Rancher 中每个 CNI 网络插件支持的不同功能:
+
+| 提供商 | 网络模型 | 路线分发 | 网络策略 | 网格 | 外部数据存储 | 加密 | Ingress/Egress 策略 |
+| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- |
+| Canal | 封装 (VXLAN) | 否 | 是 | 否 | K8s API | 是 | 是 |
+| Flannel | 封装 (VXLAN) | 否 | 否 | 否 | K8s API | 是 | 否 |
+| Calico | 封装(VXLAN,IPIP)或未封装 | 是 | 是 | 是 | Etcd 和 K8s API | 是 | 是 |
+| Weave | 封装 | 是 | 是 | 是 | 否 | 是 | 是 |
+| Cilium | 封装 (VXLAN) | 是 | 是 | 是 | Etcd 和 K8s API | 是 | 是 |
+
+- 网络模型:封装或未封装。如需更多信息,请参阅 [CNI 中使用的网络模型](#cni-使用了哪些网络模型)。
+
+- 路由分发:一种外部网关协议,用于在互联网上交换路由和可达性信息。BGP 可以帮助进行跨集群 pod 之间的网络。此功能对于未封装的 CNI 网络插件是必须的,并且通常由 BGP 完成。如果你想构建跨网段拆分的集群,路由分发是一个很好的功能。
+
+- 网络策略:Kubernetes 提供了强制执行规则的功能,这些规则决定了哪些 service 可以使用网络策略进行相互通信。这是从 Kubernetes 1.7 起稳定的功能,可以与某些网络插件一起使用。
+
+- 网格:允许在不同的 Kubernetes 集群间进行 service 之间的网络通信。
+
+- 外部数据存储:具有此功能的 CNI 网络插件需要一个外部数据存储来存储数据。
+
+- 加密:允许加密和安全的网络控制和数据平面。
+
+- Ingress/Egress 策略:允许你管理 Kubernetes 和非 Kubernetes 通信的路由控制。
+
+
+## CNI 社区人气
+
+下表总结了不同的 GitHub 指标,让你了解每个项目的受欢迎程度和活动。数据收集于 2022 年 1 月。
+
+| 提供商 | 项目 | Stars | Forks | Contributors |
+| ---- | ---- | ---- | ---- | ---- |
+| Canal | https://github.com/projectcalico/canal | 679 | 100 | 21 |
+| Flannel | https://github.com/flannel-io/flannel | 7k | 2.5k | 185 |
+| Calico | https://github.com/projectcalico/calico | 3.1k | 741 | 224 |
+| Weave | https://github.com/weaveworks/weave/ | 6.2k | 635 | 84 |
+| Cilium | https://github.com/cilium/cilium | 10.6k | 1.3k | 352 |
+
+
+
+## 使用哪个 CNI 插件?
+
+这取决于你的项目需求。各个提供商都有不同的功能和选项。没有一个提供商可以满足所有用户的需求。
+
+Canal 是默认的 CNI 网络插件。对于大多数用例,我们推荐你使用它。它使用 Flannel 为容器提供封装网络,同时添加 Calico 网络策略,可以在网络方面提供项目/命名空间隔离。
+
+## 如何配置 CNI 网络插件?
+
+如需了解如何为你的集群配置网络插件,请参阅[集群选项](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。有关更高级的配置选项,请参阅有关使用[配置文件](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)和[网络插件](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/)选项来配置集群的说明。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features-in-v2.5.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features-in-v2.5.md
new file mode 100644
index 00000000000..ec1898663b2
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features-in-v2.5.md
@@ -0,0 +1,26 @@
+---
+title: Rancher 弃用的功能
+---
+
+### Rancher 的弃用策略是什么?
+
+我们在支持[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方弃用策略。
+
+### 在哪里可以找到 Rancher 已弃用的功能?
+
+Rancher 会在 GitHub 上的[发行说明](https://github.com/rancher/rancher/releases)中公布已弃用的功能。请参阅以下补丁版本了解已弃用的功能:
+
+| 补丁版本 | 发布日期 |
+|---------------|---------------|
+| [2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) | 2021 年 8 月 31 日 |
+| [2.6.1](https://github.com/rancher/rancher/releases/tag/v2.6.1) | 2021 年 10 月 11 日 |
+| [2.6.2](https://github.com/rancher/rancher/releases/tag/v2.6.2) | 2021 年 10 月 19 日 |
+| [2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) | 2021 年 12 月 21 日 |
+| [2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | 2022 年 3 月 31 日 |
+| [2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) | 2022 年 5 月 12 日 |
+| [2.6.6](https://github.com/rancher/rancher/releases/tag/v2.6.6) | 2022 年 6 月 30 日 |
+
+
+### 如果某个功能标记为弃用,我要怎么做?
+
+如果某个发行版将某功能标记为"Deprecated"(已弃用),该功能仍然可用并受支持,从而允许用户按照常规流程进行升级。在升级到该功能被标记为"已删除"的发行版前,用户/管理员应该计划剥离该功能。对于新部署,我们建议不要使用已弃用的功能。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features.md
new file mode 100644
index 00000000000..b594bd0a42b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/deprecated-features.md
@@ -0,0 +1,26 @@
+---
+title: Rancher 中已弃用的功能
+---
+
+
+
+
+
+### Rancher 的弃用策略是什么?
+
+我们已经在支持的[服务条款](https://rancher.com/support-maintenance-terms)中发布了官方的弃用策略。
+
+### 在哪里可以了解 Rancher 中已弃用哪些功能?
+
+Rancher 将在 GitHub 上发布的 Rancher 的[发版说明](https://github.com/rancher/rancher/releases)中发布已弃用的功能。有关已弃用的功能,请参阅以下的补丁版本:
+
+| Patch 版本 | 发布时间 |
+| --------------------------------------------------------------- | ------------------ |
+| [2.8.3](https://github.com/rancher/rancher/releases/tag/v2.8.3) | 2024 年 3 月 28 日 |
+| [2.8.2](https://github.com/rancher/rancher/releases/tag/v2.8.2) | 2024 年 2 月 8 日 |
+| [2.8.1](https://github.com/rancher/rancher/releases/tag/v2.8.1) | 2024 年 1 月 22 日 |
+| [2.8.0](https://github.com/rancher/rancher/releases/tag/v2.8.0) | 2023 年 12 月 6 日 |
+
+### 当一个功能被标记为弃用我可以得到什么样的预期?
+
+当功能被标记为“已弃用”时,它依然可用并得到支持,允许按照常规的流程进行升级。一旦升级完成,用户/管理员应开始计划在升级到标记为已移除的版本之前放弃使用已弃用的功能。对于新的部署,建议不要使用已弃用的功能。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/dockershim.md
new file mode 100644
index 00000000000..d1dc4c19136
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/dockershim.md
@@ -0,0 +1,45 @@
+---
+title: Dockershim
+---
+
+Dockershim 是 Kubelet 和 Docker Daemon 之间的 CRI 兼容层。Kubernetes 1.20 版本宣布了[移除树内 Dockershim](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/)。目前计划在 Kubernetes 1.24 中移除。有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
+
+从 Kubernetes 1.21 开始。RKE 集群支持外部 Dockershim,来让用户继续使用 Docker 作为 CRI 运行时。现在,我们通过使用 [Mirantis 和 Docker ](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) 来确保 RKE 集群可以继续使用 Docker,从而实现上游开源社区的 Dockershim。
+
+要启用外部 Dockershim,配置以下选项:
+
+```
+enable_cri_dockerd: true
+```
+
+如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。即使在 Kubernetes 1.24 删除了树内 Dockershim 之后,你也可以通过 Rancher 升级和管理导入的 RKE2 和 K3s Kubernetes 集群。
+
+### 常见问题
+
+
+
+Q: 如果要获得 Rancher 对上游 Dockershim 的支持,我需要升级 Rancher 吗?
+
+对于 RKE,Dockershim 的上游支持从 Kubernetes 1.21 开始。你需要使用 Rancher 2.6 或更高版本才能获取使用 Kubernetes 1.21 的 RKE 的支持。详情请参阅我们的[支持矩阵](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
+
+
+
+Q: 我目前的 RKE 使用 Kubernetes 1.20。为了避免出现不再支持 Dockershim 的情况,我是否需要尽早将 RKE 升级到 Kubernetes 1.21?
+
+A: 在使用 Kubernetes 1.20 的 RKE 中,Dockershim 版本依然可用,而且在 Kubernetes 1.24 之前不会在上游弃用。Kubernetes 会发出弃用 Dockershim 的警告,而 Rancher 在使用 Kubernetes 1.21 的 RKE 中已经缓解了这个问题。你可以按照计划正常升级到 Kubernetes 1.21,但也应该考虑在升级到 Kubernetes 1.22 时启用外部 Dockershim。在升级到 Kubernetes 1.24 之前,你需要启用外部 Dockershim,此时现有的实现都会被删除。
+
+有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
+
+
+
+Q: 如果我不想再依赖 Dockershim,我还有什么选择?
+
+A: 你可以为 Kubernetes 使用不需要 Dockershim 支持的运行时,如 Containerd。RKE2 和 K3s 就是其中的两个选项。
+
+
+
+Q: 如果我目前使用 RKE1,但想切换到 RKE2,我可以怎样进行迁移?
+
+A: Rancher 也在探索就地升级路径的可能性。此外,你始终可以使用 kubectl 将工作负载迁移到另一个集群。
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/general-faq.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/general-faq.md
new file mode 100644
index 00000000000..5cf116534af
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/general-faq.md
@@ -0,0 +1,51 @@
+---
+title: 一般常见问题解答
+---
+
+
+
+
+
+本文包含了用户常见的 Rancher 2.x 问题。
+
+有关常见技术问题,请参阅[常见技术问题解答](technical-items.md)。
+
+## Rancher 2.x 支持 Docker Swarm 和 Mesos 作为环境类型吗?
+
+如果你在 Rancher 2.x 中创建环境,Swarm 和 Mesos 将不再是可选的标准选项。但是,Swarm 和 Mesos 还能继续作为可以部署的商店应用程序。这是一个艰难的决定,但这是大势所趋。比如说,15,000 多个集群可能只有大约 200 个在运行 Swarm。
+
+## 是否可以使用 Rancher 2.x 管理 Azure Kubernetes 服务?
+
+是的。请参阅我们的[集群管理]((../how-to-guides/new-user-guides/manage-clusters/manage-clusters.md))指南,了解 AKS 上可用的 Rancher 功能,以及相关的 [AKS 的文档](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)
+
+## Rancher 是否支持 Windows?
+
+Rancher 支持 Windows Server 1809 容器。有关如何使用 Windows Worker 节点设置集群的详细信息,请参阅[为 Windows 配置自定义集群](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md)。
+
+## Rancher 是否支持 Istio?
+
+Rancher 支持 [Istio](../pages-for-subheaders/istio.md)。
+
+## Rancher 2.x 是否支持使用 Hashicorp 的 Vault 来存储密文?
+
+密文管理已在我们的 roadmap 上,但我们尚未将该功能分配给特定版本。
+
+## Rancher 2.x 是否也支持 RKT 容器?
+
+目前,我们只支持 Docker。
+
+## Rancher 2.x 是否支持将 Calico、Contiv、Contrail、Flannel、Weave net 等网络插件用于嵌入和已注册的 Kubernetes?
+
+Rancher 开箱即用地为 Kubernetes 集群提供了几个 CNI 网络插件,分别是 Canal、Flannel、Calico 和 Weave。有关官方支持的详细信息,请参阅 [Rancher 支持矩阵](https://rancher.com/support-maintenance-terms/)。
+
+## Rancher 是否计划支持 Traefik?
+
+目前,我们不打算提供嵌入式 Traefik 支持,但我们仍在探索负载均衡方案。
+
+## 我可以将 OpenShift Kubernetes 集群导入 2.x 吗?
+
+我们的目标是运行任何上游 Kubernetes 集群。因此,Rancher 2.x 应该可以与 OpenShift 一起使用,但我们尚未对此进行测试。
+
+## Rancher 会集成 Longhorn 吗?
+
+是的。Longhorn 已集成到 Rancher 2.5+ 中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/install-and-configure-kubectl.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/install-and-configure-kubectl.md
new file mode 100644
index 00000000000..21c301639b6
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/install-and-configure-kubectl.md
@@ -0,0 +1,29 @@
+---
+title: 安装和配置 kubectl
+---
+
+`kubectl` 是一个 CLI 工具,用于运行 Kubernetes 集群相关的命令。Rancher 2.x 中的许多维护和管理任务都需要它。
+
+### 安装
+
+请参阅 [kubectl 安装](https://kubernetes.io/docs/tasks/tools/install-kubectl/)将 kubectl 安装到你的操作系统上。
+
+### 配置
+
+使用 RKE 创建 Kubernetes 集群时,RKE 会在本地目录中创建一个 `kube_config_cluster.yml`,该文件包含使用 `kubectl` 或 `helm` 等工具连接到新集群的凭证。
+
+你可以将此文件复制为 `$HOME/.kube/config`。如果你使用多个 Kubernetes 集群,将 `KUBECONFIG` 环境变量设置为 `kube_config_cluster.yml` 的路径:
+
+```
+export KUBECONFIG=$(pwd)/kube_config_cluster.yml
+```
+
+使用 `kubectl` 测试你的连接性,并查看你是否可以获取节点列表:
+
+```
+kubectl get nodes
+ NAME STATUS ROLES AGE VERSION
+165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1
+165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1
+165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/rancher-is-no-longer-needed.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/rancher-is-no-longer-needed.md
new file mode 100644
index 00000000000..ffb98927c69
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/rancher-is-no-longer-needed.md
@@ -0,0 +1,61 @@
+---
+title: 卸载 Rancher
+---
+
+本文介绍了如果你不再需要 Rancher、不想再由 Rancher 管理集群、或想删除 Rancher Server 需要怎么做。
+
+
+### 如果 Rancher Server 被删除,下游集群中的工作负载会怎样?
+
+如果 Rancher 删除了或无法恢复,Rancher 管理的下游 Kubernetes 集群中的所有工作负载将继续正常运行。
+
+### 如果删除了 Rancher Server,该如何访问下游集群?
+
+如果删除了 Rancher,访问下游集群的方式取决于集群的类型和集群的创建方式。总而言之:
+
+- **注册集群**:集群不受影响,你可以注册集群前的方法访问该集群。
+- **托管的 Kubernetes 集群**:如果你在 Kubernetes 云提供商(例如 EKS、GKE 或 AKS)中创建集群,你可以继续使用提供商的云凭证来管理集群。
+- **RKE 集群**:要访问 [RKE 集群](../pages-for-subheaders/launch-kubernetes-with-rancher.md),集群必须启用了[授权集群端点(authorized cluster endpoint,ACE)](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#4-授权集群端点),而且你必须从 Rancher UI 下载了集群的 kubeconfig 文件。RKE 集群默认启用授权集群端点。通过使用此端点,你可以直接使用 kubectl 访问你的集群,而不用通过 Rancher Server 的[认证代理](../reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md#1-认证代理)进行通信。有关配置 kubectl 以使用授权集群端点的说明,请参阅[使用 kubectl 和 kubeconfig 文件直接访问集群](../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证)。这些集群将使用删除 Rancher 时配置的身份验证快照。
+
+### 如果我不想再使用 Rancher 了该怎么做?
+
+:::note
+
+之前推荐的 [System Tools](../reference-guides/system-tools.md) 自 2022 年 6 月起已弃用。
+
+:::
+
+如果你[在 Kubernetes 集群上安装了 Rancher](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md),你可以使用 [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) 工具删除 Rancher。
+
+在高可用 (HA) 模式下卸载 Rancher 还将删除所有 `helm-operation-*` Pod 和以下应用程序:
+
+- fleet
+- fleet-agent
+- rancher-operator
+- rancher-webhook
+
+自定义资源 (CRD) 和自定义命名空间仍需要手动删除。
+
+如果你在 Docker 中安装 Rancher,则可以通过删除运行 Rancher 的单个 Docker 容器来卸载 Rancher。
+
+移除 Rancher 不会影响导入的集群。有关其他集群类型,请参考[移除 Rancher 后访问下游集群](#如果删除了-rancher-server该如何访问下游集群)。
+
+### 如果我不想 Rancher 管理我的注册集群该怎么办?
+
+如果你在 Rancher UI 中删除了已注册的集群,则该集群将与 Rancher 分离,集群不会发生改变,你可以使用注册集群之前的方法访问该集群。
+
+要分离集群:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+2. 转到要与 Rancher 分离的已注册集群,然后单击 **⋮ > 删除**。
+3. 单击**删除**。
+
+**结果**:注册的集群已与 Rancher 分离,并在 Rancher 外正常运行。
+
+### 如果我不想 Rancher 管理我的 RKE 集群或托管的 Kubernetes 集群该怎么办?
+
+目前,我们没有将这些集群从 Rancher 中分离出来的功能。在这种情况下,“分离”指的是将 Rancher 组件移除出集群,并独立于 Rancher 管理对集群的访问。
+
+[此 issue](https://github.com/rancher/rancher/issues/25234) 跟踪了在没有 Rancher 的情况下管理这些集群的功能。
+
+有关如何在删除 Rancher Server 后访问集群的更多信息,请参阅[本节](#如果删除了-rancher-server该如何访问下游集群)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/security.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/security.md
new file mode 100644
index 00000000000..0078c58eac7
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/security.md
@@ -0,0 +1,14 @@
+---
+title: 安全
+
+---
+
+**是否有强化指南?**
+
+强化指南现在位于[安全](../pages-for-subheaders/rancher-security.md)部分。
+
+
+
+**Rancher Kubernetes 集群 CIS Benchmark 测试的结果是什么?**
+
+我们已经针对强化的 Rancher Kubernetes 集群运行了 CIS Kubernetes Benchmark 测试。你可以在[安全](../pages-for-subheaders/rancher-security.md)中找到该评估的结果。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/technical-items.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/technical-items.md
new file mode 100644
index 00000000000..2bc3cfb6bfc
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/technical-items.md
@@ -0,0 +1,176 @@
+---
+title: 技术
+---
+
+### 如何重置管理员密码?
+
+Docker 安装:
+```
+$ docker exec -ti reset-password
+New password for default administrator (user-xxxxx):
+
+```
+
+Kubernetes 安装(Helm):
+```
+$ KUBECONFIG=./kube_config_cluster.yml
+$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password
+New password for default administrator (user-xxxxx):
+
+```
+
+
+
+### 我删除/停用了最后一个 admin,该如何解决?
+Docker 安装:
+```
+$ docker exec -ti ensure-default-admin
+New default administrator (user-xxxxx)
+New password for default administrator (user-xxxxx):
+
+```
+
+Kubernetes 安装(Helm):
+```
+$ KUBECONFIG=./kube_config_cluster.yml
+$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin
+New password for default administrator (user-xxxxx):
+
+```
+### 如何启用调试日志记录?
+
+请参阅[故障排除:日志记录](../troubleshooting/other-troubleshooting-tips/logging.md)。
+
+### 我的 ClusterIP 不响应 ping,该如何解决?
+
+ClusterIP 是一个虚拟 IP,不会响应 ping。要测试 ClusterIP 是否配置正确,最好的方法是使用 `curl` 访问 IP 和端口并检查它是否响应。
+
+### 在哪里管理节点模板?
+
+打开你的账号菜单(右上角)并选择`节点模板`。
+
+### 为什么我的四层负载均衡器处于 `Pending` 状态?
+
+四层负载均衡器创建为 `type: LoadBalancer`。Kubernetes 需要一个可以满足这些请求的云提供商或控制器,否则这些请求将永远处于 `Pending` 状态。有关更多信息,请参阅[云提供商](../pages-for-subheaders/set-up-cloud-providers.md)或[创建外部负载均衡器](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)。
+
+### Rancher 的状态存储在哪里?
+
+- Docker 安装:在 `rancher/rancher` 容器的嵌入式 etcd 中,位于 `/var/lib/rancher`。
+- Kubernetes install:在为运行 Rancher 而创建的 RKE 集群的 etcd 中。
+
+### 支持的 Docker 版本是如何确定的?
+
+我们遵循上游 Kubernetes 版本验证过的 Docker 版本。如果需要获取验证过的版本,请查看 Kubernetes 版本 CHANGELOG.md 中的 [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.10.md#external-dependencies)。
+
+### 如何访问 Rancher 创建的节点?
+
+你可以转到**节点**视图,然后下载用于访问 Rancher 创建的节点的 SSH 密钥。选择要访问的节点并单击行尾 **⋮** 按钮,然后选择**下载密钥**,如下图所示。
+
+
+
+解压缩下载的 zip 文件,并使用 `id_rsa` 文件连接到你的主机。请务必使用正确的用户名(如果是 RancherOS,则使用 `rancher` 或 `docker`;如果是 Ubuntu,则使用 `ubuntu`;如果是 Amazon Linux,则使用 `ec2-user`)。
+
+```
+$ ssh -i id_rsa user@ip_of_node
+```
+
+### 如何在 Rancher 中自动化任务 X?
+
+UI 由静态文件组成,并根据 API 的响应工作。换言之,UI 中可以执行的每个操作/任务都可以通过 API 进行自动化。有两种方法可以实现这一点:
+
+* 访问 `https://your_rancher_ip/v3` 并浏览 API 选项。
+* 在使用 UI 时捕获 API 调用(通常使用 [Chrome 开发者工具](https://developers.google.com/web/tools/chrome-devtools/#network),但你也可以使用其他工具)。
+
+### 节点的 IP 地址改变了,该如何恢复?
+
+节点需要配置静态 IP(或使用 DHCP 保留的 IP)。如果节点的 IP 已更改,你必须在集群中删除并重新添加它。删除后,Rancher 会将集群更新为正确的状态。如果集群不再处于 `Provisioning` 状态,则已从集群删除该节点。
+
+节点的 IP 地址发生变化时,Rancher 会失去与节点的连接,因此无法正常清理节点。请参阅[清理集群节点](../how-to-guides/new-user-guides/manage-clusters/clean-cluster-nodes.md)来清理节点。
+
+在集群中移除并清理节点时,你可以将节点重新添加到集群中。
+
+### 如何将其他参数/绑定/环境变量添加到 Rancher 启动的 Kubernetes 集群的 Kubernetes 组件中?
+
+你可以使用集群选项中的[配置文件](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-集群配置文件参考)选项来添加其他参数/绑定/环境变量。有关详细信息,请参阅 RKE 文档中的[其他参数、绑定和环境变量](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/),或浏览 [Cluster.ymls 示例](https://rancher.com/docs/rke/latest/en/example-yamls/)。
+
+### 如何检查证书链是否有效?
+
+使用 `openssl verify` 命令来验证你的证书链:
+
+:::tip
+
+将 `SSL_CERT_DIR` 和 `SSL_CERT_FILE` 配置到虚拟位置,从而确保在手动验证时不使用操作系统安装的证书。
+
+:::
+
+```
+SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem
+rancher.yourdomain.com.pem: OK
+```
+
+如果你看到 `unable to get local issuer certificate` 错误,则表示链不完整。通常情况下,这表示你的服务器证书由中间 CA 颁发。如果你已经拥有此证书,你可以在证书的验证中使用它,如下所示:
+
+```
+SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem
+rancher.yourdomain.com.pem: OK
+```
+
+如果你已成功验证证书链,你需要在服务器证书中包含所需的中间 CA 证书,从而完成与 Rancher 连接的证书链(例如,使用 Rancher Agent)。服务器证书文件中证书的顺序首先是服务器证书本身(`rancher.yourdomain.com.pem` 的内容),然后是中间 CA 证书(`intermediate.pem` 的内容):
+
+```
+-----BEGIN CERTIFICATE-----
+%YOUR_CERTIFICATE%
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+%YOUR_INTERMEDIATE_CERTIFICATE%
+-----END CERTIFICATE-----
+```
+
+如果在验证过程中仍然出现错误,你可以运行以下命令,检索服务器证书的主题和颁发者:
+
+```
+openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem
+subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com
+issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA
+```
+
+### 如何在服务器证书中检查 `Common Name` 和 `Subject Alternative Names`?
+
+虽然技术上仅需要 `Subject Alternative Names` 中有一个条目,但在 `Common Name` 和 `Subject Alternative Names` 中都包含主机名可以最大程度地提高与旧版浏览器/应用程序的兼容性。
+
+检查 `Common Name`:
+
+```
+openssl x509 -noout -subject -in cert.pem
+subject= /CN=rancher.my.org
+```
+
+检查 `Subject Alternative Names`:
+
+```
+openssl x509 -noout -in cert.pem -text | grep DNS
+ DNS:rancher.my.org
+```
+
+### 为什么节点发生故障时重新调度一个 pod 需要 5 分钟以上的时间?
+
+这是以下默认 Kubernetes 设置的组合导致的:
+
+* kubelet
+ * `node-status-update-frequency`:指定 kubelet 将节点状态发布到 master 的频率(默认 10s)。
+* kube-controller-manager
+ * `node-monitor-period`:在 NodeController 中同步 NodeStatus 的周期(默认 5s)。
+ * `node-monitor-grace-period`:在将节点标记为不健康之前,允许节点无响应的时间长度(默认 40s)。
+ * `pod-eviction-timeout`:在故障节点上删除 pod 的宽限期(默认 5m0s)。
+
+有关这些设置的更多信息,请参阅 [Kubernetes:kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) 和 [Kubernetes:kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/)。
+
+Kubernetes 1.13 默认启用 `TaintBasedEvictions` 功能。有关详细信息,请参阅 [Kubernetes:基于污点的驱逐](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions)。
+
+* kube-apiserver(Kubernetes 1.13 及更高版本)
+ * `default-not-ready-toleration-seconds`:表示 `notReady:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
+ * `default-unreachable-toleration-seconds`:表示 `unreachable:NoExecute` 的容忍度的 `tolerationSeconds`,该设置默认添加到还没有该容忍度的 pod。
+
+### 我可以在 UI 中使用键盘快捷键吗?
+
+是的,你可以使用键盘快捷键访问 UI 的大部分内容。要查看快捷方式的概览,请在 UI 任意位置按 `?`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/telemetry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/telemetry.md
new file mode 100644
index 00000000000..400f6e839ad
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/faq/telemetry.md
@@ -0,0 +1,31 @@
+---
+title: 遥测
+---
+
+### 什么是遥测?
+
+遥测(Telemetry)收集 Rancher 安装大小、使用的组件版本以及使用功能的汇总信息。Rancher Labs 会使用此信息来改进产品,我们不会与第三方共享此信息。
+
+### 收集什么信息?
+
+我们不会收集任何识别信息(如用户名、密码或用户资源的名称或地址)。
+
+收集的主要内容包括:
+
+- 每个集群的节点总数(最小、平均、最大、总数)及其大小(例如 CPU 核心数和 RAM)。
+- 集群、项目、命名空间和 Pod 等逻辑资源的聚合计数。
+- 用于部署集群和节点的驱动程序计数(例如 GKE、EC2、导入与自定义)。
+- 部署在节点上的 Kubernetes 组件、操作系统和 Docker 的版本。
+- 是否启用了某些可选组件(例如,使用了哪些身份验证提供程序)。
+- 运行的 Rancher 的镜像名称和版本。
+- 此安装的唯一随机标识符。
+
+### 我可以看到发送的信息吗?
+
+如果启用了遥测,你可以转到 `https:///v1-telemetry` 查看当前数据。
+
+如果未启用遥测,则收集数据的进程未运行,因此没有可供查看的内容。
+
+### 如何打开或关闭它?
+
+完成初始设置后,管理员可以转到 UI `全局`中的`设置`页面,单击**编辑**,然后将 `telemetry-opt` 更改为 `in` 或 `out`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
new file mode 100644
index 00000000000..8e5c0eca63f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md
@@ -0,0 +1,99 @@
+---
+title: 在离线环境中升级
+---
+
+:::note
+
+以下说明假设你已经按照[本页](upgrades.md)的 Kubernetes 升级说明操作(包括先决条件)到步骤 3:升级 Rancher。
+
+:::
+
+### Rancher Helm 模板选项
+
+使用安装 Rancher 时选择的选项来渲染 Rancher 模板。参考下表来替换每个占位符。Rancher 需要配置为使用私有镜像仓库,以便配置所有 Rancher 启动的 Kubernetes 集群或 Rancher 工具。
+
+根据你在安装过程中做出的选择,完成以下步骤之一。
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 输出压缩包的版本号。 |
+| `` | 指向负载均衡器的 DNS 名称。 |
+| `` | 你的私有镜像仓库的 DNS 名称。 |
+| `` | 在 K8s 集群上运行的 cert-manager 版本。 |
+
+
+### 选项 A:使用默认的自签名证书
+
+```
+helm template rancher ./rancher-.tgz --output-dir . \
+ --no-hooks \ # prevent files for Helm hooks from being generated
+ --namespace cattle-system \
+ --set hostname= \
+ --set certmanager.version= \
+ --set rancherImage=/rancher/rancher \
+ --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher
+ --set useBundledSystemChart=true # Use the packaged Rancher system charts
+```
+
+#### 解决 UPGRADE FAILED 错误
+
+如果你遇到错误消息 `Error: UPGRADE FAILED: "rancher" has no deployed releases`,Rancher 可能是通过 `helm template` 命令安装的。要成功升级 Rancher,请改用以下命令:
+
+```
+helm template rancher ./rancher-.tgz --output-dir . \
+ --no-hooks \ # prevent files for Helm hooks from being generated
+ --namespace cattle-system \
+ --set hostname= \
+ --set certmanager.version= \
+ --set rancherImage=/rancher/rancher \
+ --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher
+ --set useBundledSystemChart=true # Use the packaged Rancher system charts
+```
+
+执行 Helm 命令后,需要应用渲染后的模板:
+
+```
+kubectl -n cattle-system apply -R -f ./rancher
+```
+### 选项 B:使用 Kubernetes 密文从文件中获取证书
+
+```plain
+helm template rancher ./rancher-.tgz --output-dir . \
+ --no-hooks \ # prevent files for Helm hooks from being generated
+ --namespace cattle-system \
+ --set hostname= \
+ --set rancherImage=/rancher/rancher \
+ --set ingress.tls.source=secret \
+ --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher
+ --set useBundledSystemChart=true # Use the packaged Rancher system charts
+```
+
+如果你使用的是私有 CA 签名的证书,请在 `--set ingress.tls.source=secret` 后加上 `--set privateCA=true`:
+
+```plain
+helm template rancher ./rancher-.tgz --output-dir . \
+ --no-hooks \ # prevent files for Helm hooks from being generated
+ --namespace cattle-system \
+ --set hostname= \
+ --set rancherImage=/rancher/rancher \
+ --set ingress.tls.source=secret \
+ --set privateCA=true \
+ --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher
+ --set useBundledSystemChart=true # Use the packaged Rancher system charts
+```
+
+## 验证升级
+
+登录 Rancher 以确认升级成功。
+
+:::tip
+
+升级后出现网络问题?
+
+请参见[恢复集群网络](/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md)。
+
+:::
+
+## 已知升级问题
+
+你可以在 [GitHub](https://github.com/rancher/rancher/releases) 发布说明以及 [Rancher 论坛](https://forums.rancher.com/c/announcements/12)中找到每个 Rancher 版本的已知问题。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
similarity index 80%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
index b189e6d7a35..437bc35815e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md
@@ -20,12 +20,12 @@ Rancher 可以安装在任何 Kubernetes 集群上。这个集群可以使用上
你可参考以下教程,以获得设置 Kubernetes 集群的帮助:
-- **RKE**:[安装 RKE Kubernetes 集群的教程](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md);[为高可用 RKE 集群设置基础设施的教程](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md)。
-- **K3s**:[安装 K3s Kubernetes 集群的教程](../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md);[设置高可用 K3s 集群的基础设施的教程](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md)。
-- **RKE2:** :[安装 RKE2 Kubernetes 集群的教程](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md);[设置高可用 RKE2 集群的基础设施的教程](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md)。
-- **Amazon EKS**:[在 Amazon EKS 上安装 Rancher 以及如何安装 Ingress Controller 以访问 Rancher Server](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md)。
-- **AKS**:[使用 Azure Kubernetes 服务安装 Rancher 以及如何安装 Ingress Controller 以访问 Rancher Server](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)。
-- **GKE**:有关如何使用 GKE 安装 Rancher,包括如何安装 Ingress Controller 以便可以访问 Rancher Server,请参阅[此页面](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md)。GKE 创建 Kubernetes 集群时有两种运行模式,分别是 Autopilot 和 Standard 模式。Autopilot 模式的集群配置对编辑 kube-system 命名空间有限制。但是,Rancher 在安装时需要在 kube-system 命名空间中创建资源。因此,你将无法在以 Autopilot 模式创建的 GKE 集群上安装 Rancher。
+- **RKE**:[安装 RKE Kubernetes 集群的教程](../../../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md);[为高可用 RKE 集群设置基础设施的教程](../../../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md)。
+- **K3s**:[安装 K3s Kubernetes 集群的教程](../../../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md);[设置高可用 K3s 集群的基础设施的教程](../../../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md)。
+- **RKE2:** :[安装 RKE2 Kubernetes 集群的教程](../../../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md);[设置高可用 RKE2 集群的基础设施的教程](../../../how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md)。
+- **Amazon EKS**:[在 Amazon EKS 上安装 Rancher 以及如何安装 Ingress Controller 以访问 Rancher Server](rancher-on-amazon-eks.md)。
+- **AKS**:[使用 Azure Kubernetes 服务安装 Rancher 以及如何安装 Ingress Controller 以访问 Rancher Server](rancher-on-aks.md)。
+- **GKE**:有关如何使用 GKE 安装 Rancher,包括如何安装 Ingress Controller 以便可以访问 Rancher Server,请参阅[此页面](rancher-on-gke.md)。GKE 创建 Kubernetes 集群时有两种运行模式,分别是 Autopilot 和 Standard 模式。Autopilot 模式的集群配置对编辑 kube-system 命名空间有限制。但是,Rancher 在安装时需要在 kube-system 命名空间中创建资源。因此,你将无法在以 Autopilot 模式创建的 GKE 集群上安装 Rancher。
### Ingress Controller
@@ -43,17 +43,17 @@ Rancher UI 和 API 通过 Ingress 公开。换言之,安装 Rancher 的 Kubern
设置 Kubernetes 集群需要以下 CLI 工具。请确保这些工具已安装并在你的 `$PATH` 中可用。
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes 命令行工具。
-- [Helm](https://docs.helm.sh/using_helm/#installing-helm) - Kubernetes 的包管理器。请参见 [Helm 版本要求](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md)选择 Helm 版本来安装 Rancher。请为你的具体平台参见 [Helm 项目提供的说明](https://helm.sh/docs/intro/install/)。
+- [Helm](https://docs.helm.sh/using_helm/#installing-helm) - Kubernetes 的包管理器。请参见 [Helm 版本要求](../resources/helm-version-requirements.md)选择 Helm 版本来安装 Rancher。请为你的具体平台参见 [Helm 项目提供的说明](https://helm.sh/docs/intro/install/)。
## 安装 Rancher Helm Chart
Rancher 是使用 Kubernetes 的 [Helm](https://helm.sh/) 包管理器安装的。Helm Chart 为 Kubernetes YAML 清单文件提供了模板语法。通过 Helm,用户可以创建可配置的 deployment,而不仅仅只能使用静态文件。
-如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md)。
+如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md)。
-如果要指定安装的 Rancher 版本,请参见[选择 Rancher 版本](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md)。
+如果要指定安装的 Rancher 版本,请参见[选择 Rancher 版本](../resources/choose-a-rancher-version.md)。
-如果要指定用于安装 Rancher 的 Helm 版本,请参见[Helm 版本要求](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md)。
+如果要指定用于安装 Rancher 的 Helm 版本,请参见[Helm 版本要求](../resources/helm-version-requirements.md)。
:::note
@@ -122,7 +122,7 @@ Rancher Management Server 默认需要 SSL/TLS 配置来保证访问的安全性
### 4. 安装 cert-manager
-> 如果你使用自己的证书文件(`ingress.tls.source=secret`)或使用[外部负载均衡器的 TLS 终止](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止),你可以跳过此步骤。
+> 如果你使用自己的证书文件(`ingress.tls.source=secret`)或使用[外部负载均衡器的 TLS 终止](../installation-references/helm-chart-options.md#外部-tls-终止),你可以跳过此步骤。
仅在使用 Rancher 生成的证书(`ingress.tls.source=rancher`)或 Let's Encrypt 颁发的证书(`ingress.tls.source=letsEncrypt`)时,才需要安装 cert-manager。
@@ -131,7 +131,7 @@ Rancher Management Server 默认需要 SSL/TLS 配置来保证访问的安全性
:::note 重要提示:
-由于 cert-manager 的最新改动,你需要升级 cert-manager 版本。如果你需要升级 Rancher 并使用低于 0.11.0 的 cert-manager 版本,请参见[升级文档](../getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md)。
+由于 cert-manager 的最新改动,你需要升级 cert-manager 版本。如果你需要升级 Rancher 并使用低于 0.11.0 的 cert-manager 版本,请参见[升级文档](../resources/upgrade-cert-manager.md)。
:::
@@ -145,7 +145,7 @@ Rancher Management Server 默认需要 SSL/TLS 配置来保证访问的安全性
```
# 如果你手动安装了CRD,而不是在 Helm 安装命令中添加了 `--set installCRDs=true` 选项,你应该在升级 Helm Chart 之前升级 CRD 资源。
-kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.crds.yaml
+kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.crds.yaml
# 添加 Jetstack Helm 仓库
helm repo add jetstack https://charts.jetstack.io
@@ -156,8 +156,7 @@ helm repo update
# 安装 cert-manager Helm Chart
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
- --create-namespace \
- --version v1.11.0
+ --create-namespace
```
安装完 cert-manager 后,你可以通过检查 cert-manager 命名空间中正在运行的 Pod 来验证它是否已正确部署:
@@ -272,7 +271,7 @@ deployment "rancher" successfully rolled out
:::note
-如果你想检查证书是否正确,请查看[如何在服务器证书中检查 Common Name 和 Subject Alternative Names](../faq/technical-items.md#如何在服务器证书中检查-common-name-和-subject-alternative-names)。
+如果你想检查证书是否正确,请查看[如何在服务器证书中检查 Common Name 和 Subject Alternative Names](../../../faq/technical-items.md#如何在服务器证书中检查-common-name-和-subject-alternative-names)。
:::
@@ -305,18 +304,18 @@ helm install rancher rancher-/rancher \
--set privateCA=true
```
-**添加 TLS 密文(千万不要遗漏此步骤)**:现在 Rancher 已经完成部署,你还需要参考[添加 TLS 密文](../getting-started/installation-and-upgrade/resources/add-tls-secrets.md)发布证书文件,以便 Rancher 和 Ingress Controller 可以使用它们。
+**添加 TLS 密文(千万不要遗漏此步骤)**:现在 Rancher 已经完成部署,你还需要参考[添加 TLS 密文](../resources/add-tls-secrets.md)发布证书文件,以便 Rancher 和 Ingress Controller 可以使用它们。
Rancher Chart 有许多选项,用于为你的具体环境自定义安装。以下是一些常见的高级方案:
-- [HTTP 代理](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#http-代理)
-- [私有容器镜像仓库](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#私有仓库和离线安装)
-- [外部负载均衡器上的 TLS 终止](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)
+- [HTTP 代理](../installation-references/helm-chart-options.md#http-代理)
+- [私有容器镜像仓库](../installation-references/helm-chart-options.md#私有仓库和离线安装)
+- [外部负载均衡器上的 TLS 终止](../installation-references/helm-chart-options.md#外部-tls-终止)
-如需获取完整的选项列表,请参见 [Chart 选项](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md)。
+如需获取完整的选项列表,请参见 [Chart 选项](../installation-references/helm-chart-options.md)。
### 6. 验证 Rancher Server 是否部署成功
@@ -349,4 +348,4 @@ rancher 3 3 3 3 3m
使用浏览器打开把流量转发到你的负载均衡器的 DNS 域名。然后,你就会看到一个漂亮的登录页面了。
-如果遇到任何问题,请参见[故障排除](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)。
+如果遇到任何问题,请参见[故障排除](troubleshooting.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md
new file mode 100644
index 00000000000..4db511d97a0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md
@@ -0,0 +1,147 @@
+---
+title: 在 Azure Kubernetes Service 上安装 Rancher
+---
+
+本文介绍了如何在微软的 Azure Kubernetes Service (AKS) 上安装 Rancher。
+
+本指南使用命令行工具来配置一个带有 Ingress 的 AKS 集群。如果你更喜欢使用 Azure 门户来配置集群,请参见[官方文档](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal)。
+
+如果你已有一个 AKS Kubernetes 集群,请直接跳到[安装 Ingress](#5-安装-ingress) 的步骤,然后按照[此页](install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。
+
+## 先决条件
+
+:::caution
+
+部署到 Microsoft Azure 会产生费用。
+
+:::
+
+- [Microsoft Azure 账号](https://azure.microsoft.com/en-us/free/):用于创建部署 Rancher 和 Kubernetes 的资源。
+- [Microsoft Azure 订阅](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal):如果你没有的话,请访问此链接查看如何创建 Microsoft Azure 订阅。
+- [Micsoroft Azure 租户](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant):访问此链接并参考教程以创建 Microsoft Azure 租户。
+- 你的订阅有足够的配额,至少有 2 个 vCPU。有关 Rancher Server 资源要求的详情,请参见[此节](../installation-requirements/installation-requirements.md)。
+- 在 Azure 中用 Helm 安装 Rancher 时,请使用 L7 负载均衡器来避免网络问题。详情请参见 [Azure 负载均衡器限制](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations)。
+
+## 1. 准备你的工作站
+
+在工作站上安装以下命令行工具:
+
+- **az**,Azure CLI:如需获得帮助,请参见[安装步骤](https://docs.microsoft.com/en-us/cli/azure/)。
+- **kubectl**:如需获得帮助,请参见[安装步骤](https://kubernetes.io/docs/tasks/tools/#kubectl)。
+- **helm**:如需获取帮助,请参见[安装步骤](https://helm.sh/docs/intro/install/)。
+
+## 2. 创建资源组
+
+安装 CLI 后,你需要用你的 Azure 账户登录:
+
+```
+az login
+```
+
+创建一个 [资源组](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) 来保存集群的所有相关资源。使用一个适用于你实际情况的位置:
+
+```
+az group create --name rancher-rg --location eastus
+```
+
+## 3. 创建 AKS 集群
+
+运行以下命令创建一个 AKS 集群。选择适用于你实际情况的虚拟机大小。如需获得可用的大小和选项,请参见[此处](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes)。在选择 Kubernetes 版本时,请务必先查阅[支持矩阵](https://rancher.com/support-matrix/),以找出已针对你的 Rancher 版本验证的最新 Kubernetes 版本。
+
+:::note
+
+如果你要从旧的 Kubernetes 版本更新到 Kubernetes v1.22 或更高版本,你还需要[更新](https://kubernetes.github.io/ingress-nginx/user-guide/k8s-122-migration/) ingress-nginx。
+
+:::
+
+```
+az aks create \
+ --resource-group rancher-rg \
+ --name rancher-server \
+ --kubernetes-version \
+ --node-count 3 \
+ --node-vm-size Standard_D2_v3
+```
+
+集群部署需要一些时间才能完成。
+
+## 4. 获取访问凭证
+
+集群部署完成后,获取访问凭证。
+
+```
+az aks get-credentials --resource-group rancher-rg --name rancher-server
+```
+
+此命令把集群的凭证合并到现有的 kubeconfig 中,并允许 `kubectl` 与集群交互。
+
+## 5. 安装 Ingress
+
+集群需要一个 Ingress,以从集群外部访问 Rancher。要 Ingress,你需要分配一个公共 IP 地址。请确保你有足够的配额,否则它将无法分配 IP 地址。公共 IP 地址的限制在每个订阅的区域级别生效。
+
+为确保你选择了正确的 Ingress-NGINX Helm Chart,首先在 [Kubernetes/ingress-nginx 支持表](https://github.com/kubernetes/ingress-nginx#supported-versions-table)中找到与你的 Kubernetes 版本兼容的 `Ingress-NGINX 版本`。
+
+然后,运行以下命令列出可用的 Helm Chart:
+
+```
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+helm search repo ingress-nginx -l
+```
+
+`helm search` 命令的输出包含一个 `APP VERSION` 列。此列下的版本等同于你之前选择的 `Ingress-NGINX 版本`。使用应用程序版本,选择一个 Chart 版本,该版本打包了与你的 Kubernetes 兼容的应用程序。例如,如果使用的是 Kubernetes v1.24,则可以选择 v4.6.0 Helm Chart,因为 Ingress-NGINX v1.7.0 与该 Chart 打包在一起,而 v1.7.0 与 Kubernetes v1.24 兼容。如有疑问,请选择最新的兼容版本。
+
+了解你需要的 Helm chart `版本`后,运行以下命令。它安装一个带有 Kubernetes 负载均衡器服务的 `nginx-ingress-controller`:
+
+```
+helm search repo ingress-nginx -l
+helm upgrade --install \
+ ingress-nginx ingress-nginx/ingress-nginx \
+ --namespace ingress-nginx \
+ --set controller.service.type=LoadBalancer \
+ --set controller.service.annotations."service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path"=/healthz \
+ --set controller.service.externalTrafficPolicy=Local \
+ --version 4.6.0 \
+ --create-namespace
+```
+
+## 6. 获取负载均衡器的 IP
+
+运行以下命令获取负载均衡器的 IP 地址:
+
+```
+kubectl get service ingress-nginx-controller --namespace=ingress-nginx
+```
+
+返回的结果应与以下内容类似:
+
+```
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
+ AGE
+ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP
+ 67s
+```
+
+保存 `EXTERNAL-IP`。
+
+## 7. 设置 DNS
+
+到 Rancher Server 的外部流量需要重定向到你创建的负载均衡器。
+
+创建指向你保存的 `EXTERNAL-IP` 的 DNS。这个 DNS 会用作 Rancher Server 的 URL。
+
+设置 DNS 的有效方法有很多。如需获取帮助,请参见 [Azure DNS 文档中心](https://docs.microsoft.com/en-us/azure/dns/)。
+
+## 8. 安装 Rancher Helm Chart
+
+按照[本页](install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
+
+安装 Rancher 时,使用上一步获取的 DNS 名称作为 Rancher Server 的 URL。它可以作为 Helm 选项传递进来。例如,如果 DNS 名称是 `rancher.my.org`,你需要使用 `--set hostname=rancher.my.org` 选项来运行 Helm 安装命令。
+
+在此设置之上安装 Rancher 时,你还需要将以下值传递到 Rancher Helm 安装命令,以设置与 Rancher 的 Ingress 资源一起使用的 Ingress Controller 的名称:
+
+```
+--set ingress.ingressClassName=nginx
+```
+
+请参阅[Helm 安装命令](install-upgrade-on-a-kubernetes-cluster.md#5-根据你选择的证书选项通过-helm-安装-rancher)了解你的证书选项。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md
new file mode 100644
index 00000000000..efc95fe7e13
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md
@@ -0,0 +1,151 @@
+---
+title: 在 Amazon EKS 上安装 Rancher
+---
+
+本文介绍了如何在 Amazon EKS 集群上安装 Rancher。你也可以[通过 AWS Marketplace 安装 Rancher](../../quick-start-guides/deploy-rancher-manager/aws-marketplace.md)。
+
+如果你已经有一个 EKS Kubernetes 集群,请直接跳转到[安装 Ingress](#5-安装-ingress)这个步骤。然后按照[此处](install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的步骤安装 Rancher Helm Chart。
+
+## 为 Rancher Server 创建 EKS 集群
+
+在本节中,你将使用命令行工具安装一个带有 Ingress 的 EKS 集群。如果你想在 EKS 上使用 Rancher 时使用较少的资源,请使用此方法。
+
+:::note 先决条件:
+
+- 已有一个 AWS 账户。
+- 建议使用 IAM 用户而不是 AWS 根账户。你将需要 IAM 用户的访问密钥 (access key) 和密文秘钥 (secret key) 来配置 AWS 命令行界面。
+- IAM 用户需要具备[eksctl 文档](https://eksctl.io/usage/minimum-iam-policies/)中描述的最低 IAM 策略。
+
+:::
+
+### 1. 准备你的工作站
+
+在工作站上安装以下命令行工具:
+
+- **AWS CLI v2**:如需获取帮助,请参见[安装步骤](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)。
+- **eksctl**:如需获取帮助,请参见[安装步骤](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html)。
+- **kubectl**:如需获得帮助,请参见[安装步骤](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html)。
+- **helm**:如需获取帮助,请参见[安装步骤](https://helm.sh/docs/intro/install/)。
+
+### 2. 配置 AWS CLI
+
+运行以下命令,配置 AWS CLI:
+
+```
+aws configure
+```
+
+输入以下参数:
+
+| 值 | 描述 |
+|-------|-------------|
+| AWS Access Key ID | 具有 EKS 权限的 IAM 用户的访问密钥凭证。 |
+| AWS Secret Access Key | 具有 EKS 权限的 IAM 用户的密文密钥凭证。 |
+| Default region name | 集群节点所在的 [AWS 区域](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions)。 |
+| Default output format | 输入 `json`。 |
+
+### 3. 创建 EKS 集群
+
+运行以下命令创建一个 EKS 集群。使用适用于你的用例的 AWS 区域。在选择 Kubernetes 版本时,请务必先查阅[支持矩阵](https://rancher.com/support-matrix/),以找出已针对你的 Rancher 版本验证的最新 Kubernetes 版本。
+
+**注意**:如果你要从旧的 Kubernetes 版本更新到 Kubernetes v1.22 或更高版本,你还需要[更新](https://kubernetes.github.io/ingress-nginx/user-guide/k8s-122-migration/) ingress-nginx。
+
+```
+eksctl create cluster \
+ --name rancher-server \
+ --version \
+ --region us-west-2 \
+ --nodegroup-name ranchernodes \
+ --nodes 3 \
+ --nodes-min 1 \
+ --nodes-max 4 \
+ --managed
+```
+
+使用 CloudFormation 进行的集群部署可能需要一些时间才能完成。
+
+### 4. 测试集群
+
+运行以下命令测试集群:
+
+```
+eksctl get cluster
+```
+
+返回的结果应与以下内容类似:
+
+```
+eksctl get cluster
+2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0
+2021-03-18 15:09:35 [ℹ] using region us-west-2
+NAME REGION EKSCTL CREATED
+rancher-server-cluster us-west-2 True
+```
+
+### 5. 安装 Ingress
+
+集群需要一个 Ingress,以从集群外部访问 Rancher。
+
+为确保你选择了正确的 Ingress-NGINX Helm Chart,首先在 [Kubernetes/ingress-nginx 支持表](https://github.com/kubernetes/ingress-nginx#supported-versions-table)中找到与你的 Kubernetes 版本兼容的 `Ingress-NGINX 版本`。
+
+然后,运行以下命令列出可用的 Helm Chart:
+
+```
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+helm search repo ingress-nginx -l
+```
+
+`helm search` 命令的输出包含一个 `APP VERSION` 列。此列下的版本等同于你之前选择的 `Ingress-NGINX 版本`。使用应用程序版本,选择一个 Chart 版本,该版本打包了与你的 Kubernetes 兼容的应用程序。例如,如果使用的是 Kubernetes v1.23,则可以选择 v4.6.0 Helm Chart,因为 Ingress-NGINX v1.7.0 与该 Chart 打包在一起,而 v1.7.0 与 Kubernetes v1.23 兼容。如有疑问,请选择最新的兼容版本。
+
+了解你需要的 Helm chart `版本`后,运行以下命令。它安装一个带有 Kubernetes 负载均衡器服务的 `nginx-ingress-controller`:
+
+```
+helm upgrade --install \
+ ingress-nginx ingress-nginx/ingress-nginx \
+ --namespace ingress-nginx \
+ --set controller.service.type=LoadBalancer \
+ --version 4.6.0 \
+ --create-namespace
+```
+
+### 6. 获取负载均衡器的 IP
+
+运行以下命令获取负载均衡器的 IP 地址:
+
+```
+kubectl get service ingress-nginx-controller --namespace=ingress-nginx
+```
+
+返回的结果应与以下内容类似:
+
+```
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
+ AGE
+ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP
+ 27m
+```
+
+保存 `EXTERNAL-IP`。
+
+### 7. 设置 DNS
+
+到 Rancher Server 的外部流量需要重定向到你创建的负载均衡器。
+
+创建指向你保存的外部 IP 地址的 DNS。这个 DNS 会用作 Rancher Server 的 URL。
+
+设置 DNS 的有效方法有很多。如需获得帮助,请参见 AWS 文档中心的[转发流量到 ELB 负载均衡器](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html)。
+
+### 8. 安装 Rancher Helm Chart
+
+按照[本页](install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
+
+安装 Rancher 时,使用上一步获取的 DNS 名称作为 Rancher Server 的 URL。它可以作为 Helm 选项传递进来。例如,如果 DNS 名称是 `rancher.my.org`,你需要使用 `--set hostname=rancher.my.org` 选项来运行 Helm 安装命令。
+
+在此设置之上安装 Rancher 时,你还需要将以下值传递到 Rancher Helm 安装命令,以设置与 Rancher 的 Ingress 资源一起使用的 Ingress Controller 的名称:
+
+```
+--set ingress.ingressClassName=nginx
+```
+
+请参阅[Helm 安装命令](install-upgrade-on-a-kubernetes-cluster.md#5-根据你选择的证书选项通过-helm-安装-rancher)了解你的证书选项。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
new file mode 100644
index 00000000000..04c2d4ff181
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md
@@ -0,0 +1,201 @@
+---
+title: 在 GKE 集群上安装 Rancher
+---
+
+在本节中,你将学习如何使用 GKE 安装 Rancher。
+
+如果你已经有一个 GKE Kubernetes 集群,请直接跳转到[安装 Ingress](#7-安装-ingress)这个步骤。然后按照[此处](install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的步骤安装 Rancher Helm Chart。
+
+## 先决条件
+
+- 你需要有一个 Google 账号。
+- 你需要有一个 Google Cloud Billing 账号。你可使用 Google Cloud Console 来管理你的 Cloud Billing 账号。有关 Cloud Console 的详情,请参见 [ Console 通用指南](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599)。
+- 你需要至少一个在用的 IP 地址和至少 2 个 CPU 的云配额。有关 Rancher Server 的硬件要求,请参见[本节](../installation-requirements/installation-requirements.md)。
+
+## 1. 启用 Kubernetes Engine API
+
+按照以下步骤启用 Kubernetes Engine API:
+
+1. 访问 Google Cloud Console 中的 [Kubernetes Engine 页面](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886)。
+1. 创建或选择一个项目。
+1. 打开项目,并为项目启用 Kubernetes Engine API。等待 API 和相关服务的启用。这可能需要几分钟时间。
+1. 确保为你的云项目启用了计费。有关如何为你的项目启用计费,请参见 [Google Cloud 文档中心](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)。
+
+## 2. 打开 Cloud Shell
+
+Cloud Shell 是一个 shell 环境,用于管理托管在 Google Cloud 上的资源。Cloud Shell 预装了 `gcloud` 命令行工具和 kubectl 命令行工具中。`gcloud` 工具为 Google Cloud 提供主要的命令行界面,而`kubectl` 则提供针对 Kubernetes 集群的主要命令行界面。
+
+下文描述了如何从 Google Cloud Console 或从本地工作站启动 Cloud Shell。
+
+### Cloud Shell
+
+如需从 [Google Cloud Console](https://console.cloud.google.com) 启动 shell,请在控制台的右上角点击终端按钮。鼠标悬停在按钮上时,它会标记为 **Activate Cloud Shell**。
+
+### 本地 Shell
+
+执行以下步骤以安装 `gcloud` 和 `kubectl`:
+
+1. 按照[这些步骤](https://cloud.google.com/sdk/docs/install)安装 Cloud SDK。The Cloud SDK 包括 `gcloud` 命令行工具。不同操作系统对应的步骤有所不同。
+1. 安装 Cloud SDK 后,运行以下命令以安装 `kubectl` 命令行工具:
+
+ ```
+ gcloud components install kubectl
+ ```
+ 后面的步骤会配置 `kubectl`,使其用于使用新的 GKE 集群。
+1. 如果 Helm 3 未安装的话,[安装 Helm 3](https://helm.sh/docs/intro/install/)。
+1. 使用 `HELM_EXPERIMENTAL_OCI` 变量来启用 Helm 的实验功能 [OCI 镜像支持](https://github.com/helm/community/blob/master/hips/hip-0006.md)。把以下行添加到 `~/.bashrc` (或 macOS 中的 `~/.bash_profile`,或者你的 shell 存储环境变量的地方):
+
+ ```
+ export HELM_EXPERIMENTAL_OCI=1
+ ```
+1. 运行以下命令来加载你更新的 `.bashrc` 文件:
+
+ ```
+ source ~/.bashrc
+ ```
+ 如果你运行的是 macOS,使用这个命令:
+ ```
+ source ~/.bash_profile
+ ```
+
+
+
+## 3. 配置 gcloud CLI
+
+选择以下方法之一配置默认的 gcloud 设置:
+
+- 如果你想了解默认值,请使用 gcloud init。
+- 如需单独设置你的项目 ID、地区和区域,使用 gcloud config。
+
+
+
+
+1. 运行 gcloud init 并按照指示操作:
+
+ ```
+ gcloud init
+ ```
+ 如果你在远程服务器上使用 SSH,使用 --console-only 标志,以防止该命令启动浏览器。
+
+ ```
+ gcloud init --console-only
+ ```
+2. 按照指示,以授权 gcloud 使用你的 Google Cloud 账户,并选择你创建的新项目。
+
+
+
+
+
+
+
+## 4. 确认 gcloud 的配置是否正确
+
+运行:
+
+```
+gcloud config list
+```
+
+返回的结果应与以下内容类似:
+
+```
+[compute]
+region = us-west1 # Your chosen region
+zone = us-west1-b # Your chosen zone
+[core]
+account =
+disable_usage_reporting = True
+project =
+
+Your active configuration is: [default]
+```
+
+## 5. 创建一个 GKE 集群
+
+下面的命令创建了一个三节点的集群。
+
+把 `cluster-name` 替换为你新集群的名称。
+
+在选择 Kubernetes 版本时,请务必先查阅[支持矩阵](https://rancher.com/support-matrix/),以找出已针对你的 Rancher 版本验证的最新 Kubernetes 版本。
+
+要使用 Rancher 成功创建 GKE 集群,GKE 必须处于 Standard 模式。GKE 在创建 Kubernetes 集群时有两种运行模式,分别是 Autopilot 和 Standard 模式。Autopilot 模式的集群配置对编辑 kube-system 命名空间有限制。但是,Rancher 在安装时需要在 kube-system 命名空间中创建资源。因此,你将无法在以 Autopilot 模式创建的 GKE 集群上安装 Rancher。如需详细了解 GKE Autopilot 模式和 Standard 模式之间的差异,请访问[比较 GKE Autopilot 和 Standard ](https://cloud.google.com/kubernetes-engine/docs/resources/autopilot-standard-feature-comparison)。
+
+**注意**:如果你要从旧的 Kubernetes 版本更新到 Kubernetes v1.22 或更高版本,你还需要[更新](https://kubernetes.github.io/ingress-nginx/user-guide/k8s-122-migration/) ingress-nginx。
+
+```
+gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=
+```
+
+## 6. 获取验证凭证
+
+创建集群后,你需要获得认证凭证才能与集群交互:
+
+```
+gcloud container clusters get-credentials cluster-name
+```
+
+此命令将 `kubectl` 配置成使用你创建的集群。
+
+## 7. 安装 Ingress
+
+集群需要一个 Ingress,以从集群外部访问 Rancher。
+
+以下命令安装带有 LoadBalancer 服务的 `nginx-ingress-controller`:
+
+```
+helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+helm repo update
+helm upgrade --install \
+ ingress-nginx ingress-nginx/ingress-nginx \
+ --namespace ingress-nginx \
+ --set controller.service.type=LoadBalancer \
+ --version 4.0.18 \
+ --create-namespace
+```
+
+## 8. 获取负载均衡器的 IP
+
+运行以下命令获取负载均衡器的 IP 地址:
+
+```
+kubectl get service ingress-nginx-controller --namespace=ingress-nginx
+```
+
+返回的结果应与以下内容类似:
+
+```
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s
+```
+
+保存 `EXTERNAL-IP`。
+
+## 9. 设置 DNS
+
+到 Rancher Server 的外部流量需要重定向到你创建的负载均衡器。
+
+创建指向你保存的外部 IP 地址的 DNS。这个 DNS 会用作 Rancher Server 的 URL。
+
+设置 DNS 的有效方法有很多。如需获取帮助,请参见 Google Cloud 文档中的[管理 DNS 记录](https://cloud.google.com/dns/docs/records)部分。
+
+## 10. 安装 Rancher Helm Chart
+
+按照[本页](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#安装-rancher-helm-chart)的说明安装 Rancher Helm Chart。任何 Kubernetes 发行版上安装的 Rancher 的 Helm 说明都是一样的。
+
+安装 Rancher 时,使用上一步获取的 DNS 名称作为 Rancher Server 的 URL。它可以作为 Helm 选项传递进来。例如,如果 DNS 名称是 `rancher.my.org`,你需要使用 `--set hostname=rancher.my.org` 选项来运行 Helm 安装命令。
+
+在此设置之上安装 Rancher 时,你还需要设置与 Rancher 的 Ingress 资源一起使用的 Ingress Controller 的名称:
+
+```
+--set ingress.ingressClassName=nginx
+```
+
+请参阅[Helm 安装命令](install-upgrade-on-a-kubernetes-cluster.md#5-根据你选择的证书选项通过-helm-安装-rancher)了解你的证书选项。
+
+在 Rancher v2.7.5 中,如果你打算在集群上使用默认的 GKE Ingress 而不启用 VPC 原生的集群模式,则需要设置以下标志:
+
+```
+--set service.type=NodePort
+```
+
+此设置是必要的,这考虑了与 ClusterIP(`cattle-system/rancher` 的默认类型)之间的兼容性问题。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-psact.yaml b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-psact.yaml
new file mode 100644
index 00000000000..ae248cc9b7c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-psact.yaml
@@ -0,0 +1,37 @@
+apiVersion: apiserver.config.k8s.io/v1
+kind: AdmissionConfiguration
+plugins:
+- configuration:
+ apiVersion: pod-security.admission.config.k8s.io/v1
+ defaults:
+ audit: restricted
+ audit-version: latest
+ enforce: restricted
+ enforce-version: latest
+ warn: restricted
+ warn-version: latest
+ exemptions:
+ namespaces:
+ - ingress-nginx
+ - kube-system
+ - cattle-system
+ - cattle-epinio-system
+ - cattle-fleet-system
+ - longhorn-system
+ - cattle-neuvector-system
+ - cattle-monitoring-system
+ - rancher-alerting-drivers
+ - cis-operator-system
+ - cattle-csp-adapter-system
+ - cattle-externalip-system
+ - cattle-gatekeeper-system
+ - istio-system
+ - cattle-istio-system
+ - cattle-logging-system
+ - cattle-windows-gmsa-system
+ - cattle-sriov-system
+ - cattle-ui-plugin-system
+ - tigera-operator
+ kind: PodSecurityConfiguration
+ name: PodSecurity
+ path: ""
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
new file mode 100644
index 00000000000..d5c661c421f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md
@@ -0,0 +1,154 @@
+---
+title: 回滚
+---
+
+本页概述了如何在升级 Rancher 后将 Rancher 回滚到之前的版本。
+
+请在以下情况时按照本页的说明进行操作:
+- 正在运行的 Rancher 实例在备份完成后升级到了更新的版本。
+- 上游(本地)集群与进行备份的集群相同。
+
+:::tip
+
+* [请参阅这些步骤来迁移 Rancher](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md).
+* 如果你需要还原 Rancher 到同一版本的之前的状态, 请参阅[还原 Rancher](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md).
+
+:::
+
+## 特殊场景下的操作
+
+在以下场景中需要执行替代步骤来进行回滚:
+- 从 v2.6.4 及更高版本回滚到 v2.6.x 的早期版本。
+- 从 v2.7.7 及更高版本回滚到 v2.7.x 的早期版本。
+
+Rancher v2.6.4 将 cluster-api 模块从 v0.4.4 升级到 v1.0.2。反过来,cluster-api 的 v1.0.2 版本将集群 API 的自定义资源定义 (CRD) 从 `cluster.x-k8s.io/v1alpha4` 升级到 `cluster.x-k8s.io/v1beta1`。当你尝试将 Rancher v2.6.4 回滚到以前版本的 Rancher v2.6.x 时,CRD 升级到 v1beta1 会导致回滚失败。这是因为使用旧 apiVersion (v1alpha4) 的 CRD 与 v1beta1 不兼容。
+
+在 Rancher v2.7.7 版本中,应用 `rancher-provisioning-capi` 会自动安装在上游(本地)集群上,来替代嵌入的 cluster-api 控制器。 如果上游集群同时包含该应用和 Rancher v2.7.6 及更早版本,则会发生冲突和意外错误。 因此,如果你尝试从 Rancher v2.7.7 回滚到 Rancher v2.7.x 的任何早期版本,需要执行替代的步骤。
+
+### 步骤 1: 清理上游(本地)集群
+
+要避免回滚失败,你需要在尝试恢复操作或回滚**之前**根据该[说明](https://github.com/rancher/rancher-cleanup/blob/main/README.md)运行以下 Rancher 脚本:
+
+
+* `cleanup.sh`:清理集群。
+* `verify.sh`:检查集群中是否有任何与 Rancher 相关的资源。
+
+
+:::caution
+
+`cleanup.sh` 运行的时候会有停机时间,这是因为脚本会删除 Rancher 创建的资源。
+
+:::
+
+**结果:** 上游(本地)集群中所有 Rancher 关联的资源将被清理。
+
+请参阅 [rancher/rancher-cleanup 仓库](https://github.com/rancher/rancher-cleanup) 以获取更多细节和源码。
+
+### 步骤 2: 还原备份并启动 Rancher
+
+此时上游集群上应该已经没有 Rancher 相关的资源了。 因此,下一步与将 Rancher 迁移到不包含 Rancher 资源的新集群相同。
+
+按照[说明](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)安装 Rancher-Backup Helm Chart 并恢复 Rancher 到之前的状态。请记住:
+1. 步骤 3 可以跳过,因为之前安装的 Cert-Manager 应用应该还在上游(本地)集群中。
+2. 执行到步骤 4 时,在要回滚到的 local 集群上安装你需要的 Rancher 版本。
+
+## 回滚到 Rancher 2.5.0+
+
+要回滚到 Rancher 2.5.0+,使用 **Rancher 备份**应用并通过备份来恢复 Rancher。
+
+回滚后,Rancher 必须以较低/较早的版本启动。
+
+还原是通过创建 Restore 自定义资源实现的。
+
+:::note 重要提示:
+
+* 请按照此页面上的说明在已备份的同一集群上还原 Rancher。要把 Rancher 迁移到新集群,请参照步骤[迁移 Rancher](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
+
+* 在使用相同设置恢复 Rancher 时,Rancher deployment 在恢复开始前被手动缩减,然后 Operator 将在恢复完成后将其缩回。因此,在恢复完成之前,Rancher 和 UI 都将不可用。如果 UI 不可用时,你可使用 `kubectl create -f restore.yaml`YAML 恢复文件来使用初始的集群 kubeconfig。
+
+:::
+
+### 步骤 1 :创建 Restore 自定义资源
+
+1. 点击 **☰ > 集群管理**。
+1. 找到你的本地集群,并点击 **Explore**。
+1. 在左侧导航栏中,点击 **Rancher 备份 > 还原**。
+ :::note
+
+ 如果 Rancher Backups 应用不可见,你需要到 **Apps** 的 Charts 页面中安装应用。详情请参见[此处](../../../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md#access-charts)。
+
+ :::
+
+1. 单击**创建**。
+1. 使用表单或 YAML 创建 Restore。如需获取使用表单创建 Restore 资源的更多信息,请参见[配置参考](../../../reference-guides/backup-restore-configuration/restore-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md)。
+1. 如需使用 YAML 编辑器,点击**创建 > 使用 YAML 文件创建**。然后输入 Restore YAML。以下是 Restore 自定义资源示例:
+
+ ```yaml
+ apiVersion: resources.cattle.io/v1
+ kind: Restore
+ metadata:
+ name: restore-migration
+ spec:
+ backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz
+ encryptionConfigSecretName: encryptionconfig
+ storageLocation:
+ s3:
+ credentialSecretName: s3-creds
+ credentialSecretNamespace: default
+ bucketName: rancher-backups
+ folder: rancher
+ region: us-west-2
+ endpoint: s3.us-west-2.amazonaws.com
+ ```
+ 如需获得配置 Restore 的帮助,请参见[配置参考](../../../reference-guides/backup-restore-configuration/restore-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md)。
+
+1. 单击**创建**。
+
+**结果**:已创建备份文件并更新到目标存储位置。资源还原顺序如下:
+
+1. 自定义资源定义(CRD)
+2. 集群范围资源
+3. 命名空间资源
+
+如需查看还原的处理方式,请检查 Operator 的日志。按照如下步骤获取日志:
+
+```yaml
+kubectl get pods -n cattle-resources-system
+kubectl logs -n cattle-resources-system -f
+```
+
+### 步骤 2:回滚到上一个 Rancher 版本
+
+你可以使用 Helm CLI 回滚 Rancher。要回滚到上一个版本:
+
+```yaml
+helm rollback rancher -n cattle-system
+```
+
+如果你不是想回滚到上一个版本,你也可以指定回滚的版本。查看部署历史记录:
+
+```yaml
+helm history rancher -n cattle-system
+```
+
+确定目标版本后,执行回滚。此示例回滚到版本 `3`:
+
+```yaml
+helm rollback rancher 3 -n cattle-system
+```
+
+## 回滚到 Rancher 2.2-2.4
+
+要回滚到 2.5 之前的 Rancher 版本,参考此处的步骤[恢复备份 — Kubernetes 安装](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md)。如果恢复 Rancher Server 的集群的某个快照,Rancher 的版本以及状态均会恢复回到快照时的版本和状态。
+
+有关回滚 Docker 安装的 Rancher,请参见[本页](../other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md)。
+
+:::note
+
+托管集群对其状态具有权威性。因此,恢复 Rancher Server 不会恢复快照后对托管集群进行的工作负载部署或更改。
+
+:::
+
+## 回滚到 Rancher 2.0-2.1
+
+我们不再支持回滚到 Rancher 2.0-2.1。回滚到这些版本的说明保留在[此处](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md),仅用于无法升级到 v2.2 的情况。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
new file mode 100644
index 00000000000..2912d831f55
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md
@@ -0,0 +1,188 @@
+---
+title: Rancher Server Kubernetes 集群的问题排查
+---
+
+本文介绍如何对安装在 Kubernetes 集群上的 Rancher 进行故障排除。
+
+### 相关命名空间
+
+故障排除主要针对以下 3 个命名空间中的对象:
+
+- `cattle-system`:`rancher` deployment 和 Pod。
+- `ingress-nginx`:Ingress Controller Pod 和 services。
+- `cert-manager`:`cert-manager` Pod。
+
+### "default backend - 404"
+
+很多操作都有可能导致 Ingress Controller 无法将流量转发到你的 Rancher 实例。但是大多数情况下都是由错误的 SSL 配置导致的。
+
+检查事项:
+
+- [Rancher 是否正在运行](#检查-rancher-是否正在运行)
+- [证书的 Common Name(CN)是 "Kubernetes Ingress Controller Fake Certificate"](#证书的-cn-是-kubernetes-ingress-controller-fake-certificate)
+
+### 检查 Rancher 是否正在运行
+
+使用 `kubectl` 检查 `cattle-system` 系统命名空间,并查看 Rancher Pod 的状态是否是 **Running**:
+
+```
+kubectl -n cattle-system get pods
+
+NAME READY STATUS RESTARTS AGE
+pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m
+```
+
+如果状态不是 `Running`,在 Pod 上运行 `describe`,并检查 **Events**:
+
+```
+kubectl -n cattle-system describe pod
+
+...
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost
+ Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt"
+ Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4"
+ Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4"
+ Normal Created 11m kubelet, localhost Created container
+ Normal Started 11m kubelet, localhost Started container
+```
+
+### 检查 Rancher 日志
+
+使用 `kubectl` 列出 Pod:
+
+```
+kubectl -n cattle-system get pods
+
+NAME READY STATUS RESTARTS AGE
+pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m
+```
+
+使用 `kubectl` 和 Pod 名称列出该 Pod 的日志:
+
+```
+kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh
+```
+
+### 证书的 CN 是 "Kubernetes Ingress Controller Fake Certificate"
+
+使用浏览器检查证书的详细信息。如果显示 CN 是 "Kubernetes Ingress Controller Fake Certificate",则说明读取或颁发 SSL 证书时出现了问题。
+
+:::note
+
+如果你使用的是 Let's Encrypt 证书,证书颁发的过程可能需要几分钟。
+
+:::
+
+### 排查 Cert-Manager 颁发的证书(Rancher 或 Let's Encrypt 生成的)问题
+
+`cert-manager` 有 3 部分:
+
+- `cert-manager` 命名空间中的 `cert-manager` Pod。
+- `cattle-system` 命名空间中的 `Issuer` 对象。
+- `cattle-system` 命名空间中的 `Certificate` 对象。
+
+往后操作,对每个对象运行 `kubectl describe` 并检查事件。这样,你可以追踪可能丢失的内容。
+
+以下是 Issuer 有问题的示例:
+
+```
+kubectl -n cattle-system describe certificate
+...
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready
+```
+
+```
+kubectl -n cattle-system describe issuer
+...
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found
+ Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found
+```
+
+### 排查你自己提供的 SSL 证书问题
+
+你的证书直接应用于 `cattle-system` 命名空间中的 Ingress 对象。
+
+检查 Ingress 对象的状态,并查看它是否准备就绪:
+
+```
+kubectl -n cattle-system describe ingress
+```
+
+如果 Ingress 对象已就绪,但是 SSL 仍然无法正常工作,你的证书或密文的格式可能不正确。
+
+这种情况下,请检查 nginx-ingress-controller 的日志。nginx-ingress-controller 的 Pod 中有多个容器,因此你需要指定容器的名称:
+
+```
+kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller
+...
+W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found
+```
+
+### 没有匹配的 "Issuer"
+
+你所选的 SSL 配置要求在安装 Rancher 之前先安装 Cert-Manager,否则会出现以下错误:
+
+```
+Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1"
+```
+
+在这种情况下,先安装 Cert-Manager,然后再重新安装 Rancher。
+
+
+### Canal Pod 显示 READY 2/3
+
+此问题的最常见原因是端口 8472/UDP 在节点之间未打开。因此,你可以检查你的本地防火墙、网络路由或安全组。
+
+解决网络问题后,`canal` Pod 会超时并重启以建立连接。
+
+### nginx-ingress-controller Pod 显示 RESTARTS
+
+此问题的最常见原因是 `canal` pod 未能建立覆盖网络。参见 [canal Pod 显示 READY `2/3`](#canal-pod-显示-ready-23) 进行排查。
+
+
+### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed)
+
+此错误的原因可能是:
+
+* 指定连接的用户无权访问 Docker Socket。如果是这个原因,你通过登录主机并运行 `docker ps` 命令来检查:
+
+```
+$ ssh user@server
+user@server$ docker ps
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+```
+
+如果需要了解如何进行正确设置,请参见[以非 root 用户身份管理 Docker](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user)。
+
+* 你使用的操作系统是 RedHat 或 CentOS:由于 [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565),你不能使用 `root` 用户连接到节点。因此,你需要添加一个单独的用户并配置其访问 Docker Socket。如果需要了解如何进行正确设置,请参见[以非 root 用户身份管理 Docker](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user)。
+
+* SSH 服务器版本不是 6.7 或更高版本:高版本是 Socket 转发所必须的,用于通过 SSH 连接到 Docker Socket。你可以在你要连接的主机上使用 `sshd -V` 或使用 netcat 进行检查:
+```
+$ nc xxx.xxx.xxx.xxx 22
+SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10
+```
+
+### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found
+
+`ssh_key_path` 密钥文件无法访问:请确保你已经指定了私钥文件(不是公钥 `.pub`),而且运行 `rke` 命令的用户可以访问该私钥文件。
+
+### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain
+
+`ssh_key_path` 密钥文件不是访问节点的正确文件:请仔细检查,确保你已为节点指定了正确的 `ssh_key_path` 和连接用户。
+
+### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys
+
+如需使用加密的私钥,请使用 `ssh-agent` 来使用密码来加载密钥。如果在运行 `rke` 命令的环境中找到 `SSH_AUTH_SOCK` 环境变量,它将自动用于连接到节点。
+
+### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
+
+节点无法通过配置的 `address` 和 `port` 访问。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrade-a-hardened-cluster-to-k8s-v1-25.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrade-a-hardened-cluster-to-k8s-v1-25.md
new file mode 100644
index 00000000000..f55a95063b5
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrade-a-hardened-cluster-to-k8s-v1-25.md
@@ -0,0 +1,68 @@
+---
+title: 将加固的自定义/导入集群升级到 Kubernetes v1.25
+---
+
+Kubernetes v1.25 改变了集群描述和执行安全策略的方式。从这个版本开始,[Pod 安全策略 (PSP)](https://kubernetes.io/docs/concepts/security/pod-security-policy/)不再可用。Kubernetes v1.25 将它们替换为新的安全对象:[Pod 安全标准 (PSS)](https://kubernetes.io/docs/concepts/security/pod-security-standards/) 和 [Pod 安全准入 (PSA)](https://kubernetes.io/docs/concepts/security/pod-security-admission/)。
+
+如果你具有自定义或导入的加固集群,你需要做好准备,确保将旧版本的 Kubernetes 顺利升级到 v1.25 或更高版本。
+
+:::note
+
+升级到 v1.25 后,添加必要的 Rancher 命名空间豁免。有关详细信息,请参阅 [Pod 安全准入 (PSA) 配置模板](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md#豁免必须的-rancher-命名空间)。
+
+:::
+
+## 将导入的加固集群升级到 Kubernetes v1.25 或更高版本
+
+
+
+
+在集群中的每个节点上执行以下操作:
+1. 将 [`rancher-psact.yaml`](./rancher-psact.yaml) 保存到 `/etc/rancher/rke2` 中。
+1. 编辑 RKE2 配置文件:
+ 1. 将 `profile` 字段更新为 `cis-1.23`。
+ 1. 指定刚才添加的配置文件的路径:`pod-security-admission-config-file: /etc/rancher/rke2/rancher-psact.yaml`。
+
+
+
+
+在集群中的每个节点上执行以下操作:
+
+遵循 K3s [将加固集群从 v1.24.x 升级到 v1.25.x](https://docs.k3s.io/known-issues#hardened-125)的官方说明,但使用[自定义](./rancher-psact.yaml)Rancher PSA 配置模板,而不是 K3s 官方网站上提供的配置。
+
+
+
+执行这些步骤后,你可以通过 Rancher UI 升级集群的 Kubernetes 版本:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**表中找到要更新的集群,点击 **⋮**。
+1. 选择**编辑配置**。
+1. 在 **Kubernetes 版本**下拉菜单中,选择要使用的版本。
+1. 单击**保存**。
+
+## 将自定义加固集群升级到 Kubernetes v1.25 或更高版本
+
+
+
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**表中找到要更新的集群,点击 **⋮**。
+1. 选择**编辑配置**。
+1. 在**基本信息 > 安全**下的 **CIS 配置文件**下拉菜单中,选择 `cis-1.23`。
+1. 在 **PSA 配置模板**下拉菜单中,选择 `rancher-restricted`。
+1. 在 **Kubernetes 版本**下拉菜单中,选择要使用的版本。
+1. 单击**保存**。
+
+
+
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**表中找到要更新的集群,点击 **⋮**。
+1. 选择**编辑 YAML**。
+1. 从 `kube-apiserver-arg.enable-admission-plugins` 中删除 `PodSecurityPolicy`。
+1. 在 `spec` 字段中,添加一行:`defaultPodSecurityAdmissionConfigurationTemplateName: rancher-restricted`
+1. 将 `kubernetesVersion` 更新为你选择的版本(v1.25 或更高版本)。
+1. 单击**保存**。
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md
new file mode 100644
index 00000000000..b60d3ab748b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md
@@ -0,0 +1,209 @@
+---
+title: 升级
+---
+
+本文介绍如何升级使用 Helm 安装在 Kubernetes 集群上的 Rancher Server。这些步骤也适用于使用 Helm 进行的离线安装。
+
+有关使用 Docker 安装的 Rancher 的升级说明,请参见[本页。](../other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md)
+
+如需升级 Kubernetes 集群中的组件,或 [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) 或 [附加组件(add-on)](https://rancher.com/docs/rke/latest/en/config-options/add-ons/)的定义,请参见 [RKE 升级文档](https://rancher.com/docs/rke/latest/en/upgrades/)的 Rancher Kubernetes 引擎。
+
+
+## 先决条件
+
+### 访问 kubeconfig
+
+Helm 的运行位置,应该与你的 kubeconfig 文件,或你运行 kubectl 命令的位置相同。
+
+如果你在安装 Kubernetes 时使用了 RKE,那么 config 将会在你运行 `rke up` 的目录下创建。
+
+kubeconfig 也可以通过 `--kubeconfig` 标签(详情请参见 https://helm.sh/docs/helm/helm/ )来手动指定所需的集群。
+
+### 查看已知问题
+
+如需查看每个 Rancher 版本的已知问题,请参见 [GitHub](https://github.com/rancher/rancher/releases) 中的发行说明,或查看 [Rancher 论坛](https://forums.rancher.com/c/announcements/12)。
+
+不支持 _升级_ 或 _升级到_ [rancher-alpha 仓库](../resources/choose-a-rancher-version.md#helm-chart-仓库)中的任何 Chart。
+### Helm 版本
+
+本安装指南假定你使用的是 Helm 3。
+
+
+
+如果你使用 Helm 2,请参见 [Helm 2 迁移到 Helm 3 文档](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/)。如果你不能升级到 Helm 3,[Helm 2 升级页面](/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md)提供了使用 Helm 2 升级的旧升级指南。
+
+### 离线安装:推送镜像到私有镜像仓库
+
+[仅适用于离线安装](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md):为新的 Rancher Server 版本收集和推送镜像。使用你需要针对 Rancher 版本升级的镜像,按照步骤[推送镜像到私有镜像仓库](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md)。
+
+### 使用 cert-manager 0.8.0 之前的版本升级
+
+[从 2019 年 11 月 1 日开始,Let's Encrypt 已屏蔽早于 0.8.0 的 cert-manager 实例](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753)。因此,请参见[说明](../resources/upgrade-cert-manager.md)把 cert-manager 升级到最新版本。
+
+## 升级概要
+
+按照以下步骤升级 Rancher Server:
+
+
+### 1. 备份运行 Rancher Server 的 Kubernetes 集群
+
+使用[备份应用](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md)来备份 Rancher。
+
+如果升级过程中出现问题,你将使用备份作为还原点。
+
+### 2. 更新 Helm Chart 仓库
+
+1. 更新本地 Helm 仓库缓存。
+
+ ```
+ helm repo update
+ ```
+
+1. 获取你用来安装 Rancher 的仓库名称。
+
+ 关于仓库及其区别,请参见 [Helm Chart 仓库](../resources/choose-a-rancher-version.md#helm-chart-仓库)。
+
+ - Latest:建议用于试用最新功能
+ ```
+ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
+ ```
+ - Stable:建议用于生产环境
+ ```
+ helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
+ ```
+ - Alpha:即将发布的实验性预览。
+ ```
+ helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha
+ ```
+ 注意:不支持升级到 Alpha 版、从 Alpha 版升级或在 Alpha 版之间升级。
+
+ ```
+ helm repo list
+
+ NAME URL
+ stable https://charts.helm.sh/stable
+ rancher- https://releases.rancher.com/server-charts/
+ ```
+
+ :::note
+
+ 如果你想切换到不同的 Helm Chart 仓库,请按照[切换仓库步骤](../resources/choose-a-rancher-version.md#切换到不同-helm-chart-仓库)进行操作。如果你要切换仓库,请先再次列出仓库,再继续执行步骤 3,以确保添加了正确的仓库。
+
+ :::
+
+1. 从 Helm Chart 仓库获取最新的 Chart 来安装 Rancher。
+
+ 该命令将提取最新的 Chart,并将其作为 `.tgz`文件保存在当前目录中。
+
+ ```plain
+ helm fetch rancher-/rancher
+ ```
+ 你可以通过 `--version=` 标记,来指定要升级的目标 Chart 版本。例如:
+
+ ```plain
+ helm fetch rancher-/rancher --version=2.6.8
+ ```
+
+### 3. 升级 Rancher
+
+本节介绍了如何使用 Helm 升级 Rancher 的一般(互联网连接)或离线安装。
+
+:::note 离线说明:
+
+如果你在离线环境中安装 Rancher,请跳过本页的其余部分,按照[本页](air-gapped-upgrades.md)上的说明渲染 Helm 模板。
+
+:::
+
+
+从当前安装的 Rancher Helm Chart 中获取用 `--set`传递的值。
+
+```
+helm get values rancher -n cattle-system
+
+hostname: rancher.my.org
+```
+
+:::note
+
+这个命令会列出更多的值。此处展示的只是其中一个值的例子。
+
+:::
+
+:::tip
+
+Deployment 的名称可能会有所不同。例如,如果你通过 AWS Marketplace 部署 Rancher,则 Deployment 的名称为“rancher-stable”。
+因此:
+```
+helm get values rancher-stable -n cattle-system
+
+hostname: rancher.my.org
+```
+
+:::
+
+如果要将 cert-manager 从 v1.5 或更早的版本升级到最新版本,请参阅 [cert-manager upgrade docs](../resources/upgrade-cert-manager.md#选项-c升级-15-及以下版本的-cert-manager) 了解如何在不卸载或重新安装 Rancher 的情况下升级 cert-manager。否则,请按照以下[ Rancher 升级步骤](#rancher-升级步骤)进行操作。
+
+#### Rancher 升级步骤
+
+保留你的所有设置把 Rancher 升级到最新版本。
+
+将上一步中的所有值用 `--set key=value` 追加到命令中。
+
+对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+```
+helm upgrade rancher rancher-/rancher \
+ --namespace cattle-system \
+ --set hostname=rancher.my.org
+```
+
+:::note
+
+以上是一个例子,可能有更多上一步的值需要追加。
+
+:::
+
+:::tip
+
+如果你通过 AWS Marketplace 部署 Rancher,则 Deployment 的名称为“rancher-stable”。
+因此:
+```
+helm upgrade rancher-stable rancher-/rancher \
+ --namespace cattle-system \
+ --set hostname=rancher.my.org
+```
+
+:::
+
+另外,你也可以将当前的值导出到一个文件中,并在升级时引用该文件。例如,如果你只需要改变 Rancher 的版本:
+
+1. 将当前值导出到文件:
+ ```
+ helm get values rancher -n cattle-system -o yaml > values.yaml
+ ```
+1. 只更新 Rancher 版本:
+
+ 对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+ ```
+ helm upgrade rancher rancher-/rancher \
+ --namespace cattle-system \
+ -f values.yaml \
+ --version=2.6.8
+ ```
+
+### 4. 验证升级
+
+登录 Rancher 以确认升级成功。
+
+:::tip
+
+升级后出现网络问题?
+
+请参见[恢复集群网络](/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md)。
+
+:::
+
+## 已知升级问题
+
+你可以在 [GitHub](https://github.com/rancher/rancher/releases) 发布说明以及 [Rancher 论坛](https://forums.rancher.com/c/announcements/12)中找到每个 Rancher 版本的已知问题。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-and-upgrade.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-and-upgrade.md
similarity index 56%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-and-upgrade.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-and-upgrade.md
index d57549034f6..aeef9f9c6e7 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/installation-and-upgrade.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-and-upgrade.md
@@ -14,7 +14,7 @@ description: 了解如何在开发和生产环境中安装 Rancher。了解单
- **K3s(轻量级 Kubernetes)**:也是经过认证的 Kubernetes 发行版。它比 RKE 更新,更易用且更轻量,其所有组件都在一个小于 100 MB 的二进制文件中。
- **RKE2**:一个完全合规的 Kubernetes 发行版,专注于安全和合规性。
-`restrictedAdmin` Helm Chart 选项在 **Rancher Server** 可用。如果该选项设置为 true,初始的 Rancher 用户访问本地 Kubernetes 集群会受到限制,以避免权限升级。详情请参见 [restricted-admin 角色](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#受限管理员)。
+`restrictedAdmin` Helm Chart 选项在 **Rancher Server** 可用。如果该选项设置为 true,初始的 Rancher 用户访问本地 Kubernetes 集群会受到限制,以避免权限升级。详情请参见 [restricted-admin 角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#受限管理员)。
## 安装方式概述
@@ -26,7 +26,7 @@ Rancher 可以安装在以下主要架构上:
### 通过 AWS Marketplace 在 EKS 上安装 Rancher
-你可以[通过 AWS Marketplace](../getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md) 将 Rancher 安装到 Amazon Elastic Kubernetes Service (EKS) 上。部署的 EKS 集群已生产就绪,并遵循 AWS 最佳实践。
+你可以[通过 AWS Marketplace](../quick-start-guides/deploy-rancher-manager/aws-marketplace.md) 将 Rancher 安装到 Amazon Elastic Kubernetes Service (EKS) 上。部署的 EKS 集群已生产就绪,并遵循 AWS 最佳实践。
### 单节点 Kubernetes 安装
@@ -38,7 +38,7 @@ Rancher 可以安装在单节点 Kubernetes 集群上。但是,在单节点安
如果你的目的是测试或演示,你可以使用 Docker 把 Rancher 安装到单个节点中。本地 Kubernetes 集群是安装到单个 Docker 容器中的,而 Rancher 是安装到本地集群中的。
-Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
+Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
### 其他方式
@@ -46,9 +46,9 @@ Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用
| 网络访问方式 | 基于 Kubernetes 安装(推荐) | 基于 Docker 安装 |
| ---------------------------------- | ------------------------------ | ---------- |
-| 可直接访问互联网 | [文档](install-upgrade-on-a-kubernetes-cluster.md) | [文档](rancher-on-a-single-node-with-docker.md) |
-| 使用 HTTP 代理 | [文档](rancher-behind-an-http-proxy.md) | [文档](rancher-on-a-single-node-with-docker.md)及[配置](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) |
-| 离线环境 | [文档](air-gapped-helm-cli-install.md) | [文档](air-gapped-helm-cli-install.md) |
+| 可直接访问互联网 | [文档](install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md) | [文档](other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md) |
+| 使用 HTTP 代理 | [文档](other-installation-methods/rancher-behind-an-http-proxy/rancher-behind-an-http-proxy.md) | [文档](other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)及[配置](../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) |
+| 离线环境 | [文档](other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md) | [文档](other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md) |
我们建议在 Kubernetes 集群上安装 Rancher,因为在多节点集群中,Rancher Server 可以实现高可用。高可用配置可以提升 Rancher 访问其管理的下游 Kubernetes 集群的稳定性。
@@ -56,29 +56,29 @@ Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用
如果你的目的是测试或演示,你可以将 Rancher 安装到单个 Docker 容器中。Docker 安装可以让你实现开箱即用,以使用 Rancher 设置 Kubernetes 集群。Docker 安装主要是用于探索 Rancher Server 的功能,只适用于开发和测试。
-[在 Kubernetes 上安装 Rancher 的说明](install-upgrade-on-a-kubernetes-cluster.md)介绍了如何首先使用 K3s 或 RKE 创建和管理 Kubernetes 集群,然后再将 Rancher 安装到该集群上。
+[在 Kubernetes 上安装 Rancher 的说明](install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)介绍了如何首先使用 K3s 或 RKE 创建和管理 Kubernetes 集群,然后再将 Rancher 安装到该集群上。
-如果 Kubernetes 集群中的节点正在运行且满足[节点要求](installation-requirements.md),你可以使用 Helm 将 Rancher 部署到 Kubernetes 上。Helm 使用 Rancher 的 Helm Chart 在 Kubernetes 集群的每个节点上安装 Rancher 的副本。我们建议使用负载均衡器将流量定向到集群中的每个 Rancher 副本上。
+如果 Kubernetes 集群中的节点正在运行且满足[节点要求](installation-requirements/installation-requirements.md),你可以使用 Helm 将 Rancher 部署到 Kubernetes 上。Helm 使用 Rancher 的 Helm Chart 在 Kubernetes 集群的每个节点上安装 Rancher 的副本。我们建议使用负载均衡器将流量定向到集群中的每个 Rancher 副本上。
-如需进一步了解 Rancher 架构,请参见[架构概述](rancher-manager-architecture.md),[生产级别架构推荐](../reference-guides/rancher-manager-architecture/architecture-recommendations.md)或[最佳实践指南](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md)。
+如需进一步了解 Rancher 架构,请参见[架构概述](../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md),[生产级别架构推荐](../../reference-guides/rancher-manager-architecture/architecture-recommendations.md)或[最佳实践指南](../../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md)。
## 先决条件
-安装 Rancher 之前,请确保你的节点满足所有[安装要求](installation-requirements.md)。
+安装 Rancher 之前,请确保你的节点满足所有[安装要求](installation-requirements/installation-requirements.md)。
## 架构建议
-为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 使用单独的专用 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
+为了达到最佳性能和安全性,我们建议你为 Rancher Management Server 使用单独的专用 Kubernetes 集群。不建议在此集群上运行用户工作负载。部署 Rancher 后,你可以[创建或导入集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)来运行你的工作负载。
-详情请参见[架构推荐](../reference-guides/rancher-manager-architecture/architecture-recommendations.md)。
+详情请参见[架构推荐](../../reference-guides/rancher-manager-architecture/architecture-recommendations.md)。
### 在 Kubernetes 上安装 Rancher 的更多选项
-参见 [Helm Chart 选项](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md)以了解在 Kubernetes 集群上安装 Rancher 的其他配置,包括:
+参见 [Helm Chart 选项](installation-references/helm-chart-options.md)以了解在 Kubernetes 集群上安装 Rancher 的其他配置,包括:
-- [开启 API 审计日志来记录所有事务](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#api-审计日志)
-- [负载均衡器上的 TLS 终止](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)
-- [自定义 Ingress](../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#自定义-ingress)
+- [开启 API 审计日志来记录所有事务](installation-references/helm-chart-options.md#api-审计日志)
+- [负载均衡器上的 TLS 终止](installation-references/helm-chart-options.md#外部-tls-终止)
+- [自定义 Ingress](installation-references/helm-chart-options.md#自定义-ingress)
在 Rancher 的安装指南中,我们推荐使用 K3s 或 RKE 来配置 Kubernetes 集群,然后再在这个集群中安装 Rancher。K3s 和 RKE 均提供许多配置选项,用于为你的具体环境自定义 Kubernetes 集群。有关选项和功能的完整列表,请参见:
@@ -87,8 +87,8 @@ Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用
### 在 Docker 上安装 Rancher 的更多选项
-参见 [Docker 安装选项](rancher-on-a-single-node-with-docker.md)了解其他配置,包括:
+参见 [Docker 安装选项](other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)了解其他配置,包括:
-- [开启 API 审计日志来记录所有事务](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)
-- [外部负载均衡器](../how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md)
-- [持久化数据存储](../reference-guides/single-node-rancher-in-docker/advanced-options.md#持久化数据)
+- [开启 API 审计日志来记录所有事务](../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)
+- [外部负载均衡器](../../how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md)
+- [持久化数据存储](../../reference-guides/single-node-rancher-in-docker/advanced-options.md#持久化数据)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/feature-flags.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/feature-flags.md
new file mode 100644
index 00000000000..660fc74a14a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/feature-flags.md
@@ -0,0 +1,39 @@
+---
+title: 功能开关
+---
+
+使用功能开关(Feature Flag),你可以试用可选或实验性的功能并启用正在逐步淘汰的旧版功能。
+
+要了解功能的值以及如何启用它们,请参阅[启用实验性功能](../../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
+
+:::note
+
+某些功能要求重新启动 Rancher 容器。Rancher UI 中标记了要求重启的功能。
+
+:::
+
+以下是 Rancher 中可用的功能开关列表。如果你是从旧 Rancher 版本升级的,你可能会在 Rancher UI 中看到其他功能,例如 `proxy` 或 `dashboard`(均[已中断](/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md)):
+
+- `continuous-delivery`:允许从 Fleet 中单独禁用 Fleet GitOps。有关详细信息,请参阅[持续交付](../../../how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md)。
+- `fleet`:v2.6 及更高版本的 Rancher 配置框架需要 Fleet。即使你在旧 Rancher 版本中禁用了该标志,该标志也将在升级时自动启用。有关详细信息,请参阅 [Fleet - GitOps at Scale](../../../integrations-in-rancher/fleet/fleet.md)。
+- `harvester`:管理 Virtualization Management 页面的访问。用户可以在该页面直接导航到 Harvester 集群并访问 Harvester UI。有关详细信息,请参阅 [Harvester 集成](../../../integrations-in-rancher/harvester/overview.md)。
+- `istio-virtual-service-ui`:启用[可视界面](../../../how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md)来创建、读取、更新和删除 Istio 虚拟服务和目标规则,这些都是 Istio 流量管理功能。
+- `legacy`:启用 2.5.x 及更早版本的一组功能,这些功能正逐渐被新的实现淘汰。它们是已弃用以及后续可用于新版本的功能组合。新的 Rancher 安装会默认禁用此标志。如果你从以前版本的 Rancher 升级,此标志会启用。
+- `multi-cluster-management`:允许配置和管理多个 Kubernetes 集群。此标志只能在安装时设置。后续无法启用或禁用它。
+- `rke1-custom-node-cleanup`:清除已删除的 RKE1 自定义节点。建议你启用此标志,以防止已删除的节点尝试重新加入集群。
+- `rke2`:启用配置 RKE2 集群。此标志默认启用。
+- `token-hashing`:启用令牌哈希。启用后,会使用 SHA256 算法对现有 Token 和所有新 Token 进行哈希处理。一旦对 Token 进行哈希处理,就无法撤消操作。此标志在启用后无法禁用。有关详细信息,请参阅 [API 令牌](../../../reference-guides/about-the-api/api-tokens.md#令牌哈希)。
+- `unsupported-storage-drivers`:允许启用非默认启用的存储提供程序和卷插件。有关详细信息,请参阅[允许使用不受支持的存储驱动程序](../../../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)。
+
+下表介绍了 Rancher 中功能开关的可用性和默认值。标记为“GA”的功能已普遍可用:
+
+| 功能开关名称 | 默认值 | 状态 | 可用于 |
+| ----------------------------- | ------------- | ------------ | --------------- |
+| `continuous-delivery` | `true` | GA | v2.6.0 |
+| `fleet` | `true` | 不能禁用 | v2.6.0 |
+| `fleet` | `true` | GA | v2.5.0 |
+| `harvester` | `true` | 实验功能 | v2.6.1 |
+| `legacy` | 新安装:`false`;升级:`true` | GA | v2.6.0 |
+| `rke1-custom-node-cleanup` | `true` | GA | v2.6.0 |
+| `rke2` | `true` | 实验功能 | v2.6.0 |
+| `token-hashing` | 新安装:`false`;升级:`true` | GA | v2.6.0 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md
new file mode 100644
index 00000000000..ca0cdcc3a2f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/helm-chart-options.md
@@ -0,0 +1,310 @@
+---
+title: Rancher Helm Chart 选项
+keywords: [rancher helm chart, rancher helm 选项, rancher helm chart 选项, helm chart rancher, helm 选项 rancher, helm chart 选项 rancher]
+---
+
+本文提供了 Rancher Helm Chart 的配置参考。
+
+如需选择 Helm Chart 版本,请参见[本页](../../../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md)。
+
+了解开启实验性功能的详情,请参见[本页](../../../how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
+
+## 常用选项
+
+| 选项 | 默认值 | 描述 |
+| ------------------------- | ------------- | ---------------------------------------------------------------------------------- |
+| `bootstrapPassword` | " " | `string` - 为第一个管理员用户设置[引导密码](#引导密码)。登录后,管理员需要重置密码。如不设置,会使用随机生成的引导密码。 |
+| `hostname` | " " | `string` - 你的 Rancher Server 的完全限定的域名(FQDN) |
+| `ingress.tls.source` | "rancher" | `string` - 从哪里获取 Ingress 的证书- "rancher, letsEncrypt, secret" |
+| `letsEncrypt.email` | " " | `string` - 你的邮箱地址 |
+| `letsEncrypt.environment` | "production" | `string` - 可选项:"staging, production" |
+| `privateCA` | false | `bool` - 如果你的证书是由私有 CA 签发的,把这个值设置为 true |
+
+
+
+## 高级选项
+
+| 选项 | 默认值 | 描述 |
+| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `additionalTrustedCAs` | false | `bool` - 请参见[额外的授信 CA](#额外的授信-ca) |
+| `addLocal` | "true" | `string` - 让 Rancher 检测并导入 “local” Rancher Server 集群。_注意:此选项在 2.5.0 中已不可用。你可考虑使用 `restrictedAdmin` 选项,来避免用户修改本地集群。_ |
+| `antiAffinity` | "preferred" | `string` - Rancher Pod 的反亲和性规则 - "preferred, required" |
+| `auditLog.destination` | "sidecar" | `string` - 发送审计日志到 Sidecar 容器的控制台或 hostPath 卷 - "sidecar, hostPath" |
+| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - 主机上的日志文件目标地址(仅当`auditLog.destination` 的值是 `hostPath` 时生效) |
+| `auditLog.level` | 0 | `int` - 设置 [API 审计日志](../../../how-to-guides/advanced-user-guides/enable-api-audit-log.md)等级。0 代表关闭。[0-3] |
+| `auditLog.maxAge` | 1 | `int` - 旧审计日志文件最多可保留的天数(仅当`auditLog.destination` 的值是 `hostPath` 时生效) |
+| `auditLog.maxBackup` | 1 | `int` - 审计文件最大可保留的个数(仅当 `auditLog.destination` 的值是 `hostPath` 时生效) |
+| `auditLog.maxSize` | 100 | `int` - 在审计日志被轮换前的最大容量,单位是 MB(仅当 `auditLog.destination` 的值是 `hostPath` 时生效) |
+| `auditLog.image.repository` | "registry.suse.com/bci/bci-micro" | `string` - 用于收集审计日志的镜像的位置。 |
+| `auditLog.image.tag` | "15.4.14.3" | `string` - 用于收集审计日志的镜像的标签。 |
+| `auditLog.image.pullPolicy` | "IfNotPresent" | `string` - 覆盖 auditLog 镜像的 imagePullPolicy - “Always”、“Never”、“IfNotPresent”。 |
+| `busyboxImage` | "" | `string` - 用于收集审计日志的 busybox 镜像位置。_注意:此选项已弃用,请使用 `auditLog.image.repository` 来控制审计 sidecar 镜像_。 |
+| `certmanager.version` | "" | `string` - 设置 cert-manager compatibility |
+| `debug` | false | `bool` - 在 Rancher Server 设置 debug 参数 |
+| `extraEnv` | [] | `list` - 为 Rancher 额外设置环境变量 |
+| `imagePullSecrets` | [] | `list` - 私有镜像仓库凭证的密文名称列表 |
+| `ingress.configurationSnippet` | "" | `string` - 添加额外的 Nginx 配置。可用于代理配置。 |
+| `ingress.extraAnnotations` | {} | `map` - 用于自定义 Ingress 的额外注释 |
+| `ingress.enabled` | true | 如果值为 false,Helm 不会安装 Rancher Ingress。你可把值设为 false 以部署你自己的 Ingress。 |
+| `letsEncrypt.ingress.class` | "" | `string` - cert-manager acmesolver ingress 的可选 ingress 类,用于响应 Let's Encrypt ACME 质询。选项:traefik,nginx。 | |
+| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - 不使用代理的主机名或 IP 地址的逗号分隔列表 | |
+| `proxy` | "" | `string` - 给 Rancher 配置的 HTTP[S] 代理 |
+| `rancherImage` | "rancher/rancher" | `string` - Rancher 镜像源 |
+| `rancherImagePullPolicy` | "IfNotPresent" | `string` - 覆盖 Rancher Server 镜像的 imagePullPolicy - "Always", "Never", "IfNotPresent" |
+| `rancherImageTag` | 和 Chart 版本一致 | `string` - rancher/rancher 镜像标签 |
+| `replicas` | 3 | `int` - Rancher Server 副本数。如果设为 -1,会根据集群中的可用节点数自动选择 1,2或3。 |
+| `resources` | {} | `map` - Rancher Pod 资源请求和限制 |
+| `restrictedAdmin` | `false` | `bool` - 如果值为 true,初始的 Rancher 用户访问本地 Kubernetes 集群会受到限制,以避免权限升级。详情请参见 [restricted-admin 角色](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#受限管理员)。 |
+| `systemDefaultRegistry` | "" | `string` - 用于所有系统容器镜像的私有仓库,例如 http://registry.example.com/ |
+| `tls` | "ingress" | `string` - 详情请参见[外部 TLS 终止](#外部-tls-终止)。- "ingress, external" |
+| `useBundledSystemChart` | `false` | `bool` - 选择 Rancher Server 打包的 system-charts。此参数用于离线环境安装。 |
+| `global.cattle.psp.enabled` | `true` | `bool` - 使用 Rancher v2.7.2-v2.7.4 时,选择 `false` 以禁用 Kubernetes v1.25 及更高版本的 PSP。使用 Rancher v2.7.5 及更高版本时,Rancher 会尝试检测集群是否运行不支持 PSP 的 Kubernetes 版本,如果确定集群不支持 PSP,则将默认 PSP 的使用设置为 false。你仍然可以通过显式提供此值的 `true` 或 `false` 来手动覆盖此值。在支持 PSP 的集群中(例如使用 Kubernetes v1.24 或更低版本的集群),Rancher 仍将默认使用 PSP。 |
+
+
+### 引导密码
+
+Rancher 首次启动时,会为第一个管理员用户随机生成一个密码。当管理员首次登录 Rancher 时,用于获取引导密码(Bootstrap)的命令会在 UI 上显示。管理员需要运行命令并使用引导密码登录。然后 Rancher 会让管理员重置密码。
+
+如果你想指定引导密码而不使用随机生成的密码,请参考以下命令设置密码。
+
+```plain
+--set bootstrapPassword="rancher"
+```
+
+无论你是使用提供的密码还是生成的密码,密码均存储在 Kubernetes 密文中。安装 Rancher 后,如何使用 kubectl 获取密码的说明将会在 UI 中显示:
+
+```
+kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}'
+```
+
+### API 审计日志
+
+启用 [API 审计日志](../../../how-to-guides/advanced-user-guides/enable-api-audit-log.md)。
+
+你可以像收集其他容器日志一样收集此日志。在 Rancher Server 集群上为 `System` 项目启用 [Logging](../../../integrations-in-rancher/logging/logging.md)。
+
+```plain
+--set auditLog.level=1
+```
+
+默认情况下,启用审计日志会在 Rancher pod 中创建一个 Sidecar 容器。这个容器(`rancher-audit-log`)会把日志流传输到 `stdout`。你可以像收集其他容器日志一样收集此日志。如果你使用 Sidecar 作为审计日志的目标时, `hostPath`,`maxAge`,`maxBackups` 和 `maxSize` 选项不会生效。建议使用你的操作系统或 Docker Daemon 的日志轮换功能来控制磁盘空间的使用。请为 Rancher Server 集群或 System 项目启用 [Logging](../../../integrations-in-rancher/logging/logging.md)。
+
+将 `auditLog.destination` 的值设为 `hostPath`,可以将日志转发到与主机系统共享的卷,而不是传输到 Sidecar 容器。如果目标设置为 `hostPath`,你可能需要调整其他 auditLog 参数以进行日志轮换。
+
+### 额外设置环境变量
+
+你可以使用 `extraEnv` 为 Rancher Server 额外设置环境变量。该列表以 YAML 格式传递给 Rancher 部署,它嵌入在 Rancher 容器的 `env` 下。你可以参考 Kubernetes 文档设置容器环境变量。`extraEnv` 可以使用 [Define Environment Variables for a Container](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container) 中引用的任何键。
+
+使用 `name` 和 `value` 键的示例:
+
+```plain
+--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION'
+--set 'extraEnv[0].value=1.0'
+```
+
+如果将敏感数据(例如代理认证凭证)作为环境变量的值传递,则强烈建议使用 Secret 引用。这将防止敏感数据在 Helm 或 Rancher 部署中暴露。
+
+你可以参考使用 `name`、`valueFrom.secretKeyRef.name` 和 `valueFrom.secretKeyRef.key` 键的示例。详见 [HTTP 代理](#http-代理)中的示例。
+
+### TLS 设置
+
+当你在 Kubernetes 集群内安装 Rancher 时,TLS 会在集群的 Ingress Controller 上卸载。支持的 TLS 设置取决于使用的 Ingress Controller。
+
+参见 [TLS 设置](tls-settings.md)了解更多信息和选项。
+
+### 导入 `local` 集群
+
+默认情况下,Rancher Server 会检测并导入其所在的 `local` 集群。有权访问 `local` 集群的用户对 Rancher Server 管理的所有集群具有“root”访问权限。
+
+:::caution
+
+如果你关闭 addLocal,大多数 Rancher 2.5 功能都不能使用,包括 EKS Provisioner。
+
+:::
+
+如果这在你的环境中是一个问题,你可以在初始安装时将此选项设置为“false”。
+
+此选项仅在首次安装 Rancher 时有效。详情请参见 [Issue 16522](https://github.com/rancher/rancher/issues/16522)。
+
+```plain
+--set addLocal="false"
+```
+
+### 自定义 Ingress
+
+要自定义或使用 Rancher Server 的其他 Ingress,你可以设置自己的 Ingress 注释。
+
+设置自定义证书颁发者的示例:
+
+```plain
+--set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name
+```
+
+以下是使用 `ingress.configurationSnippet`设置静态代理标头的示例。该值像模板一样进行解析,因此可以使用变量。
+
+```plain
+--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};'
+```
+
+### HTTP 代理
+
+Rancher 的一些功能(Helm Chart)需要使用互联网才能使用。你可以使用 `proxy` 设置代理服务器,或使用 `extraEnv` 设置 `HTTPS_PROXY` 环境变量来指向代理服务器。
+
+将要排除的 IP 使用逗号分隔列表添加到 `noProxy` Chart value 中。确保添加了以下值:
+- Pod 集群 IP 范围(默认值:`10.42.0.0/16`)。
+- Service Cluster IP 范围(默认值:`10.43.0.0/16`)。
+- 内部集群域(默认值:`.svc,.cluster.local`)。
+- 任何 Worker 集群 `controlplane` 节点。
+ Rancher 支持在此列表中使用 CIDR 表示法来表示范围。
+
+不包括敏感数据时,可以使用 `proxy` 或 `extraEnv` Chart 选项。使用 `extraEnv` 时将忽略 `noProxy` Helm 选项。因此,`NO_PROXY` 环境变量也必须设置为 `extraEnv`。
+
+以下是使用 `proxy` Chart 选项设置代理的示例:
+
+```plain
+--set proxy="http:///"
+```
+
+使用 `extraEnv` Chart 选项设置代理的示例:
+```plain
+--set extraEnv[1].name=HTTPS_PROXY
+--set extraEnv[1].value="http://:/"
+--set extraEnv[2].name=NO_PROXY
+--set extraEnv[2].value="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local"
+```
+
+包含敏感数据(例如代理认证凭证)时,请使用 `extraEnv` 选项和 `valueFrom.secretRef` 来防止敏感数据在 Helm 或 Rancher 部署中暴露。
+
+下面是使用 `extraEnv` 配置代理的示例。此示例 Secret 在 Secret 的 `"https-proxy-url"` 键中包含 `"http://:@:/"` 值:
+```plain
+--set extraEnv[1].name=HTTPS_PROXY
+--set extraEnv[1].valueFrom.secretKeyRef.name=secret-name
+--set extraEnv[1].valueFrom.secretKeyRef.key=https-proxy-url
+--set extraEnv[2].name=NO_PROXY
+--set extraEnv[2].value="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local"
+```
+
+有关如何配置环境变量的更多信息,请参阅[为容器定义环境变量](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#define-an-environment-variable-for-a-container)。
+
+### 额外的授信 CA
+
+如果你有私有镜像仓库(registries)、应用商店(catalogs)或拦截证书的代理,则可能需要向 Rancher 添加额外的授信 CA。
+
+```plain
+--set additionalTrustedCAs=true
+```
+
+创建完 Rancher deployment 后,将 pem 格式的 CA 证书复制到一个名为 `ca-additional.pem` 的文件中,并使用 `kubectl` 在 `cattle-system` 命名空间中创建 `tls-ca-additional` 密文。
+
+```plain
+kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem
+```
+
+### 私有仓库和离线安装
+
+有关使用私有仓库安装 Rancher 的详情,请参见[离线安装](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
+
+## 外部 TLS 终止
+
+我们建议将负载均衡器配置为 4 层均衡,将普通 80/tcp 和 443/tcp 转发到 Rancher Management 集群节点。集群上的 Ingress Controller 会将端口 80 上的 HTTP 流量重定向到端口 443 上的 HTTPS。
+
+你可以在 Rancher 集群(Ingress)外部的 L7 负载均衡器上终止 SSL/TLS。使用 `--set tls=external` 选项,将负载均衡器指向所有 Rancher 集群节点上的端口 HTTP 80。这将在 HTTP 端口 80 上暴露 Rancher 接口。请注意,允许直接连接到 Rancher 集群的客户端不会被加密。如果你选择这样做,我们建议你将网络级别的直接访问限制为仅你的负载均衡器。
+
+:::note
+
+如果你使用的是私有 CA 签名的证书,请添加 `--set privateCA=true` 并参见[添加 TLS 密文 - 使用私有 CA 签名证书](../../../getting-started/installation-and-upgrade/resources/add-tls-secrets.md),为 Rancher 添加 CA 证书。
+
+:::
+
+你的负载均衡器必须支持长期存在的 Websocket 连接,并且需要插入代理头,以便 Rancher 可以正确传送链接。
+
+### 使用 NGINX v0.25 为外部 TLS 配置 Ingress
+
+在 NGINX 0.25 中,NGINX 关于转发头和外部 TLS 终止的行为[已更改](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220)。因此,如果你同时使用 NGINX 0.25 和外部 TLS 终止配置,你必须编辑 `cluster.yml` 来为 Ingress 启用 `use-forwarded-headers` 选项。
+
+```yaml
+ingress:
+ provider: nginx
+ options:
+ use-forwarded-headers: 'true'
+```
+
+### 必须的 Header
+
+- `Host`
+- `X-Forwarded-Proto`
+- `X-Forwarded-Port`
+- `X-Forwarded-For`
+
+### 建议的超时时间
+
+- 读超时:`1800 seconds`
+- 写超时:`1800 seconds`
+- 连接超时:`30 seconds`
+
+### 健康检查
+
+Rancher 将对 `/healthz` 端点的健康检查响应`200`。
+
+### 示例 NGINX 配置
+
+此 NGINX 配置已在 NGINX 1.14 上进行了测试。
+
+:::caution
+
+此 NGINX 配置只是一个示例,可能不适合你的环境。如需查阅完整文档,请参见 [NGINX 负载均衡 - HTTP 负载均衡](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/)。
+
+:::
+
+- 将 `IP_NODE1`,`IP_NODE2` 和 `IP_NODE3` 替换为你集群中节点的 IP 地址。
+- 将两处的 `FQDN` 均替换为 Rancher 的 DNS 名称。
+- 把 `/certs/fullchain.pem` 和 `/certs/privkey.pem` 分别替换为服务器证书和服务器证书密钥的位置。
+
+```
+worker_processes 4;
+worker_rlimit_nofile 40000;
+
+events {
+ worker_connections 8192;
+}
+
+http {
+ upstream rancher {
+ server IP_NODE_1:80;
+ server IP_NODE_2:80;
+ server IP_NODE_3:80;
+ }
+
+ map $http_upgrade $connection_upgrade {
+ default Upgrade;
+ '' close;
+ }
+
+ server {
+ listen 443 ssl http2;
+ server_name FQDN;
+ ssl_certificate /certs/fullchain.pem;
+ ssl_certificate_key /certs/privkey.pem;
+
+ location / {
+ proxy_set_header Host $host;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_pass http://rancher;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ # 此项允许执行的 shell 窗口保持开启,最长可达15分钟。不使用此参数的话,默认1分钟后自动关闭。
+ proxy_read_timeout 900s;
+ proxy_buffering off;
+ }
+ }
+
+ server {
+ listen 80;
+ server_name FQDN;
+ return 301 https://$server_name$request_uri;
+ }
+}
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/installation-references.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/installation-references.md
new file mode 100644
index 00000000000..c2d9035c3cd
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/installation-references.md
@@ -0,0 +1,5 @@
+---
+title: 安装参考
+---
+
+有关其他安装资源,请参阅以下参考指南:[Rancher Helm Chart 选项](helm-chart-options.md)、[TLS 设置](tls-settings.md)和[功能开关](feature-flags.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/tls-settings.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/tls-settings.md
new file mode 100644
index 00000000000..25f5652fe6e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-references/tls-settings.md
@@ -0,0 +1,21 @@
+---
+title: TLS 设置
+---
+
+更改默认 TLS 设置的方法取决于它的安装方式。
+
+## 在高可用 Kubernetes 集群中运行 Rancher
+
+当你在 Kubernetes 集群内安装 Rancher 时,TLS 会在集群的 Ingress Controller 上卸载。可用的 TLS 设置取决于使用的 Ingress Controller:
+
+* nginx-ingress-controller(RKE1 和 RKE2 默认):[默认的 TLS 版本和密码](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#default-tls-version-and-ciphers)。
+* traefik(K3s 默认):[TLS 选项](https://doc.traefik.io/traefik/https/tls/#tls-options)。
+
+## 在单个 Docker 容器中运行 Rancher
+
+默认 TLS 配置仅支持 TLS 1.2 和安全的 TLS 密码套件。你可以通过设置以下环境变量来更改此配置:
+
+| 参数 | 描述 | 默认 | 可用选项 |
+|-----|-----|-----|-----|
+| `CATTLE_TLS_MIN_VERSION` | 最小 TLS 版本 | `1.2` | `1.0`, `1.1`, `1.2`, `1.3` |
+| `CATTLE_TLS_CIPHERS` | 支持的 TLS 密码套件 | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`, `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`, `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | 详情请参见 [Golang TLS 常量](https://golang.org/pkg/crypto/tls/#pkg-constants)。 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
new file mode 100644
index 00000000000..84f2383eaf7
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/dockershim.md
@@ -0,0 +1,47 @@
+---
+title: Dockershim
+---
+
+Dockershim 是 Kubelet 和 Docker Daemon 之间的 CRI 兼容层。Kubernetes 1.20 版本宣布了[移除树内 Dockershim](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/)。有关此移除的更多信息以及时间线,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。
+
+RKE 集群现在支持外部 Dockershim,来让用户继续使用 Docker 作为 CRI 运行时。现在,我们通过使用 [Mirantis 和 Docker ](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) 来确保 RKE 集群可以继续使用 Docker,从而实现上游开源社区的外部 Dockershim。
+
+RKE2 和 K3s 集群使用嵌入的 containerd 作为容器运行时,因此不受影响。
+
+要在 1.24 之前的 RKE 版本中启用外部 Dockershim,请配置以下选项:
+
+```
+enable_cri_dockerd: true
+```
+
+从 1.24 版本开始,以上默认为 true。
+
+如果你想使用其他容器运行时,Rancher 也提供使用 Containerd 作为默认运行时的,以边缘为中心的 K3s,和以数据中心为中心的 RKE2 Kubernetes 发行版。然后,你就可以通过 Rancher 对导入的 RKE2 和 K3s Kubernetes 集群进行升级和管理。
+
+### 常见问题
+
+
+
+Q:是否必须升级 Rancher 才能获得 Rancher 对上游外部 Dockershim 替换的支持?
+
+A:对于 RKE,Dockershim `cri_dockerd` 替换的上游支持从 Kubernetes 1.21 开始。你需要使用支持 RKE 1.21 的 Rancher 版本。详情请参见我们的支持矩阵。
+
+
+
+Q:我目前的 RKE 使用 Kubernetes 1.23。如果上游最终在 1.24 中删除 Dockershim,会发生什么?
+
+A:RKE 中带有 Kubernetes 的 Dockershim 版本将继续工作到 1.23。有关时间线的更多信息,请参见 [Kubernetes Dockershim 弃用相关的常见问题](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed)。从 1.24 开始,RKE 将默认启用 `cri_dockerd` 并在之后的版本中继续启用。
+
+
+
+Q: 如果我不想再依赖 Dockershim 或 cri_dockerd,我还有什么选择?
+
+A: 你可以为 Kubernetes 使用不需要 Dockershim 支持的运行时,如 Containerd。RKE2 和 K3s 就是其中的两个选项。
+
+
+
+Q: 如果我目前使用 RKE1,但想切换到 RKE2,我可以怎样进行迁移?
+
+A: 你可以构建一个新集群,然后将工作负载迁移到使用 Containerd 的新 RKE2 集群。Rancher 也在探索就地升级路径的可能性。
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/install-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/install-docker.md
new file mode 100644
index 00000000000..27bf949bb11
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/install-docker.md
@@ -0,0 +1,23 @@
+---
+title: 安装 Docker
+---
+
+在使用 Helm 在 RKE 集群节点上或使用 Docker 安装 Rancher Server 前,你需要在节点中先安装 Docker。RKE2 和 K3s 集群不要求使用 Docker。
+
+Docker 有几个安装方法。一种方法是参见 [Docker 官方文档](https://docs.docker.com/install/)以了解如何在 Linux 上安装 Docker。不同 Linux 发行版的安装步骤可能有所不同。
+
+另一种方式是使用 Rancher 的 Docker 安装脚本,该脚本可用于较新的 Docker 版本。 Rancher 为每个 Kubernetes 支持的上游 Docker 版本提供了安装脚本。
+
+例如,此命令可用于在 SUSE Linux Enterprise 或 Ubuntu 等主要 Linux 发行版上安装 Docker :
+
+```bash
+curl https://releases.rancher.com/install-docker/.sh | sh
+```
+
+请参阅 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix),使用匹配你的操作系统和 Rancher 版本并且经过验证的 Docker 版本。 尽管支持矩阵列出了经过验证的 Docker 版本直至补丁版本,但只有发行版的主要版本和次要版本与 Docker 安装脚本相关。
+
+请注意,必须应用以下 sysctl 设置:
+
+```bash
+net.bridge.bridge-nf-call-iptables=1
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
new file mode 100644
index 00000000000..e4800c2528f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/installation-requirements.md
@@ -0,0 +1,219 @@
+---
+title: 安装要求
+description: 如果 Rancher 配置在 Docker 或 Kubernetes 中运行时,了解运行 Rancher Server 的每个节点的节点要求
+---
+
+本文描述了对需要安装 Rancher Server 的节点的软件、硬件和网络要求。Rancher Server 可以安装在单个节点或高可用的 Kubernetes 集群上。
+
+:::note 重要提示:
+
+如果你需要在 Kubernetes 集群上安装 Rancher,该节点的要求与用于运行应用和服务的[下游集群的节点要求](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md)不同。
+
+:::
+
+Rancher UI 在基于 Firefox 或 Chromium 的浏览器(Chrome、Edge、Opera、Brave)中效果最佳。
+
+查看我们的[最佳实践](../../../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md)页面,获取在生产环境中运行 Rancher Server 的建议。
+
+## Kubernetes 与 Rancher 的兼容性
+
+Rancher 需要安装在支持的 Kubernetes 版本上。请查阅 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions),确保你的 Kubernetes 版本受支持。
+
+## 在安全加固的 Kubernetes 集群上安装 Rancher
+
+如果你在安全加固的 Kubernetes 集群上安装 Rancher,请查看[豁免必须的 Rancher 命名空间](../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md#豁免必须的-rancher-命名空间)以了解详细的要求。
+
+## 操作系统和容器运行时要求
+
+所有支持的操作系统都使用 64-bit x86 架构。Rancher 兼容当前所有的主流 Linux 发行版。
+
+[Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)列出了每个 Rancher 版本测试过的操作系统和 Docker 版本。
+
+运行 RKE 集群的节点需要安装 Docker。RKE2 或 K3s 集群不需要它。
+
+请安装 `ntp`(Network Time Protocol),以防止在客户端和服务器之间由于时间不同步造成的证书验证错误。
+
+某些 Linux 发行版的默认防火墙规则可能会阻止 Kubernetes 集群内的通信。从 Kubernetes v1.19 开始,你必须关闭 firewalld,因为它与 Kubernetes 网络插件冲突。
+
+如果你不太想这样做的话,你可以查看[相关问题](https://github.com/rancher/rancher/issues/28840)中的建议。某些用户已能成功[使用 ACCEPT 策略 为 Pod CIDR 创建一个独立的 firewalld 区域](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822)。
+
+如果你需要在 ARM64 上使用 Rancher,请参见[在 ARM64(实验功能)上运行 Rancher](../../../how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md)。
+
+### RKE2 要求
+
+对于容器运行时,RKE2 附带了自己的 containerd。RKE2 安装不需要 Docker。
+
+如需了解 RKE2 通过了哪些操作系统版本的测试,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)。
+
+
+### K3s 要求
+
+对于容器运行时,K3s 默认附带了自己的 containerd。你也可以将 K3s 配置为使用已安装的 Docker 运行时。有关在 Docker 中使用 K3s 的更多信息,请参阅 [K3s 文档](https://docs.k3s.io/advanced#using-docker-as-the-container-runtime)。
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)。如需指定 K3s 版本,在运行 K3s 安装脚本时,使用 `INSTALL_K3S_VERSION` 环境变量。
+
+如果你使用 **Raspbian Buster** 在 K3s 集群上安装 Rancher,请按照[这些步骤](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster)切换到旧版 iptables。
+
+如果你使用 Alpine Linux 的 K3s 集群上安装 Rancher,请按照[这些步骤](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup)进行其他设置。
+
+### RKE 要求
+
+RKE 需要 Docker 容器运行时。支持的 Docker 版本请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions)
+
+有关详细信息,请参阅[安装 Docker](install-docker.md)。
+
+## 硬件要求
+
+本节描述安装 Rancher Server 的节点的 CPU、内存和磁盘要求。硬件要求根据你的 Rancher 部署规模而定。
+
+### 实际考虑
+
+Rancher 的硬件占用空间取决于许多因素,包括:
+
+ - 托管的基础设施规模 (例如: 节点数量,集群数量)。
+ - 所需访问控制规则的复杂性(例如:RoleBinding 对象计数)。
+ - 工作负载数量 (例如: Kubernetes 部署,Fleet 部署)。
+ - 使用模式 (例如:主动使用的功能集合,使用频率,并发用户数量).
+
+由于存在许多可能随时间变化的影响因素,因此此处列出的要求为适合大多数用例的起点。 然而,你的用例可能有不同的要求。 若你需要对于特定场景的咨询,请[联系 Rancher]((https://rancher.com/contact/)) 以获得进一步指导。
+
+
+特别指出,本页面中的要求基于以下假设的环境提出,包括:
+ - 每种类型的 Kubernetes 资源数量小于 60,000 个。
+ - 每个节点最多 120 个 Pod。
+ - 上游(本地)集群中最多 200 个 CRD。
+ - 下游集群中最多 100 个 CRD。
+ - 最多 50 个 Fleet 部署。
+
+更多的数量也是能够达到的,但需要更高的硬件要求。 如果你有超过 20,000 个相同类型的资源,通过 Rancher UI 加载整个列表的时间可能需要几秒钟。
+
+:::note Evolution:
+
+Rancher 的代码库不断发展,用例不断变化,Rancher 积累的经验也在不断增长。
+
+随着指导方针的准确性不断的提高并且变得更加具体,硬件要求也会发生变化。
+
+如果你发现你的 Rancher 部署不再符合列出的建议,请[联系 Rancher](https://rancher.com/contact/) 进行重新评估。
+
+:::
+
+### RKE2 Kubernetes
+
+下面的表格列出了[上游集群](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中每个节点最小的 CPU 和内存要求。
+
+请注意,生产环境下的高可用安装最少需要 3 个节点。
+
+| 部署规模 | 最大集群数量 | 最大节点数量 | vCPUs | 内存 |
+| --------------- | -------- | --------- | ----- | ---- |
+| 小 | 150 | 1500 | 4 | 16 GB |
+| 中 | 300 | 3000 | 8 | 32 GB |
+| 大 (*) | 500 | 5000 | 16 | 64 GB |
+| 更大 (†) | (†) | (†) | (†) | (†) |
+
+(*): 大规模的部署需要你[遵循最佳实践](../../../reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md)以获得足够的性能。
+
+(†): 通过特别的硬件建议和调整能够实现更大的部署规模。 你可以[联系 Rancher](https://rancher.com/contact/) 进行定制评估。
+
+有关 RKE2 一般要求的更多详细信息,请参见 [RKE2 文档](https://docs.rke2.io/install/requirements)。
+
+### K3s Kubernetes
+
+下面的表格列出了[上游集群](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中每个节点最小的 CPU 和内存要求。
+
+请注意,生产环境下的高可用安装最少需要 3 个节点。
+
+| 部署规模 | 最大集群数量 | 最大节点数量 | vCPUs | 内存 | 外部数据库(*) |
+| --------------- | ---------- | ------------ | -------| ---------| ------------------------- |
+| Small | 150 | 1500 | 4 | 16 GB | 2 vCPUs, 8 GB + 1000 IOPS |
+| Medium | 300 | 3000 | 8 | 32 GB | 4 vCPUs, 16 GB + 2000 IOPS |
+| Large (†) | 500 | 5000 | 16 | 64 GB | 8 vCPUs, 32 GB + 4000 IOPS |
+
+(*):外部数据库是指将 K3s 集群数据存储在[专用的外部主机](https://docs.k3s.io/datastore)上。 这是可选的。 具体要求取决于使用的外部数据库。
+
+(†):大规模的部署需要你[遵循最佳实践](../../../reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md)以获得足够的性能。
+
+有关 K3s 一般要求的更多详细信息,请参见 [K3s 文档](https://docs.k3s.io/installation/requirements)。
+
+### 托管 Kubernetes
+
+下面的表格列出了[上游集群](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中每个节点最小的 CPU 和内存要求。
+
+请注意,生产环境下的高可用安装最少需要 3 个节点。
+
+这些要求适用于托管 Kubernetes 集群,例如 Amazon Elastic Kubernetes Service (EKS)、Azure Kubernetes Service (AKS) 或 Google Kubernetes Engine (GKE)。 它们不适用于 Rancher SaaS 解决方案,例如 [Rancher Prime Hosted](https://www.rancher.com/products/rancher)。
+
+| 部署规模 | 最大集群数量 | 最大节点数量 | vCPUs | 内存 |
+|-----------------------------|----------------------------|-------------------------|-------|-------|
+| 小 | 150 | 1500 | 4 | 16 GB |
+| 中 | 300 | 3000 | 8 | 32 GB |
+| 大 (*) | 500 | 5000 | 16 | 64 GB |
+
+(*):大规模的部署需要你[遵循最佳实践](../../../reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md)以获得足够的性能。
+
+
+### RKE
+
+下面的表格列出了[上游集群](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中每个节点最小的 CPU 和内存要求。
+
+请注意,生产环境下的高可用安装最少需要 3 个节点。
+
+| 部署规模 | 最大集群数量 | 最大节点数量 | vCPUs | 内存 |
+|-----------------------------|----------------------------|-------------------------|-------|-------|
+| 小 | 150 | 1500 | 4 | 16 GB |
+| 中 | 300 | 3000 | 8 | 32 GB |
+| 大 (*) | 500 | 5000 | 16 | 64 GB |
+
+(*): 大规模的部署需要你[遵循最佳实践](../../../reference-guides/best-practices/rancher-server/tuning-and-best-practices-for-rancher-at-scale.md)以获得足够的性能。
+
+有关 RKE 一般要求的更多详细信息,请参见 [RKE 文档](https://rke.docs.rancher.com/os)。
+
+### Docker
+
+下面的表格列出了[上游集群](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中每个节点最小的 CPU 和内存要求。
+
+请注意,在 Docker 中安装 Rancher 仅适用于开发或测试目的。不建议在生产环境中使用。
+
+| 部署规模 | 最大集群数量 | 最大节点数量 | vCPUs | 内存 |
+|-----------------------------|----------------------------|-------------------------|-------|------|
+| 小 | 5 | 50 | 1 | 4 GB |
+| 中 | 15 | 200 | 2 | 8 GB |
+
+## Ingress
+
+安装 Rancher 的 Kubernetes 集群中的每个节点都应该运行一个 Ingress。
+
+Ingress 需要部署为 DaemonSet 以确保负载均衡器能成功把流量转发到各个节点。
+
+如果是 RKE,RKE2 和 K3s 安装,你不需要手动安装 Ingress,因为它是默认安装的。
+
+对于托管的 Kubernetes 集群(EKS、GKE、AKS),你需要设置 Ingress。
+
+- **Amazon EKS**:[在 Amazon EKS 上安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md)。
+- **AKS**:[使用 Azure Kubernetes 服务安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md)。
+- **GKE**:[使用 GKE 安装 Rancher 以及如何安装 Ingress 以访问 Rancher Server](../install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md)。
+
+## 磁盘
+
+etcd 在集群中的性能决定了 Rancher 的性能。因此,为了获得最佳速度,我们建议使用 SSD 磁盘来支持 Rancher 管理的 Kubernetes 集群。在云提供商上,你还需使用能获得最大 IOPS 的最小大小。在较大的集群中,请考虑使用专用存储设备存储 etcd 数据和 wal 目录。
+
+## 网络要求
+
+本节描述了安装 Rancher Server 的节点的网络要求。
+
+:::caution
+
+如果包含 Rancher 的服务器带有 `X-Frame-Options=DENY` 标头,在升级旧版 UI 之后,Rancher UI 中的某些页面可能无法渲染。这是因为某些旧版页面在新 UI 中是以 iFrames 模式嵌入的。
+
+:::
+
+### 节点 IP 地址
+
+无论你是在单个节点还是高可用集群上安装 Rancher,每个节点都应配置一个静态 IP。如果使用 DHCP,则每个节点都应该有一个 DHCP 预留,以确保节点分配到相同的 IP 地址。
+
+### 端口要求
+
+为了确保能正常运行,Rancher 需要在 Rancher 节点和下游 Kubernetes 集群节点上开放一些端口。不同集群类型的 Rancher 和下游集群的所有必要端口,请参见[端口要求](port-requirements.md)。
+
+## Dockershim 支持
+
+有关 Dockershim 支持的详情,请参见[此页面](dockershim.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md
new file mode 100644
index 00000000000..0bf72d2e721
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md
@@ -0,0 +1,343 @@
+---
+title: 端口要求
+description: 了解 Rancher 正常运行所需的端口要求,包括 Rancher 节点和下游 Kubernetes 集群节点
+---
+
+import PortsIaasNodes from '@site/src/components/PortsIaasNodes'
+import PortsCustomNodes from '@site/src/components/PortsCustomNodes'
+import PortsImportedHosted from '@site/src/components/PortsImportedHosted'
+
+为了确保能正常运行,Rancher 需要在 Rancher 节点和下游 Kubernetes 集群节点上开放一些端口。
+
+## Rancher 节点
+
+下表列出了运行 Rancher Server 的节点之间需要开放的端口。
+
+不同的 Rancher Server 架构有不同的端口要求。
+
+Rancher 可以安装在任何 Kubernetes 集群上。如果你的 Rancher 安装在 K3s、RKE 或 RKE2 Kubernetes 集群上,请参考下面的标签页。对于其他 Kubernetes 发行版,请参见该发行版的文档,了解集群节点的端口要求。
+
+:::note 注意事项:
+
+- Rancher 节点可能要求额外出站访问已配置的外部验证提供程序(如 LDAP)。
+- Kubernetes 建议节点端口服务使用 TCP 30000-32767。
+- 对于防火墙,可能需要在集群和 Pod CIDR 内启用流量。
+- Rancher 节点可能还需要出站访问用于存储集群备份(如 Minio)的外部 S3 上的位置。
+
+:::
+
+### K3s 上 Rancher Server 节点的端口
+
+
+ 单击展开
+
+K3s server 需要开放端口 6443 才能供节点访问。
+
+使用 Flannel VXLAN 时,节点需要能够通过 UDP 端口 8472 访问其他节点。节点不应监听任何其他端口。K3s 使用反向隧道,建立节点与 Server 的出站连接,所有 kubelet 流量都通过该隧道进行。但是,如果你不使用 Flannel,而是使用自定义的 CNI,K3s 则不需要打开 8472 端口。
+
+如果要使用 Metrics Server,则需要在每个节点上打开端口 10250。
+
+:::note 重要提示:
+
+节点上的 VXLAN 端口会开放集群网络,让任何人均能访问集群。因此,不要将 VXLAN 端口暴露给外界。请使用禁用 8472 端口的防火墙/安全组来运行节点。
+
+:::
+
+下表描述了入站和出站流量的端口要求:
+
+Rancher Server 节点的入站规则
+
+| 协议 | 端口 | 源 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 80 | 执行外部 SSL 终止的负载均衡器/代理 | 使用外部 SSL 终止时的 Rancher UI/API |
+| TCP | 443 | Server 节点 Agent 节点 托管/注册的 Kubernetes 任何需要使用 Rancher UI 或 API 的源 | Rancher Agent,Rancher UI/API,kubectl |
+| TCP | 6443 | K3s Server 节点 | Kubernetes API |
+| UDP | 8472 | K3s Server 和 Agent 节点 | 仅 Flannel VXLAN 需要 |
+| TCP | 10250 | K3s Server 和 Agent 节点 | kubelet |
+
+Rancher 节点的出站规则
+
+| 协议 | 端口 | 目标 | 描述 |
+| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- |
+| TCP | 22 | 使用 Node Driver 创建的节点的任何节点 IP | 使用 Node Driver SSH 配置节点 |
+| TCP | 443 | git.rancher.io | Rancher catalog |
+| TCP | 2376 | 使用 Node Driver 创建的节点的任何节点 IP | Docker Machine 使用的 Docker daemon TLS 端口 |
+| TCP | 6443 | 托管/导入的 Kubernetes API | Kubernetes API Server |
+
+
+
+### RKE 上 Rancher Server 节点的端口
+
+
+ 单击展开
+
+通常情况下,Rancher 安装在三个 RKE 节点上,这些节点都有 etcd、controlplane 和 worker 角色。
+
+
+
+下表描述了 Rancher 节点之间流量的端口要求:
+
+Rancher 节点的流量规则
+
+| 协议 | 端口 | 描述 |
+|-----|-----|----------------|
+| TCP | 443 | Rancher Agents |
+| TCP | 2379 | etcd 客户端请求 |
+| TCP | 2380 | etcd 对等通信 |
+| TCP | 6443 | Kubernetes apiserver |
+| TCP | 8443 | NGINX Ingress 的验证 Webhook |
+| UDP | 8472 | Canal/Flannel VXLAN 覆盖网络 |
+| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe |
+| TCP | 10250 | Metrics Server 与所有节点的通信 |
+| TCP | 10254 | Ingress controller livenessProbe/readinessProbe |
+
+下表描述了入站和出站流量的端口要求:
+
+Rancher 节点的入站规则
+
+| 协议 | 端口 | 源 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 22 | RKE CLI | RKE 通过 SSH 配置节点 |
+| TCP | 80 | 负载均衡器/反向代理 | 到 Rancher UI/API 的 HTTP 流量 |
+| TCP | 443 | 负载均衡器/反向代理 所有集群节点和其他 API/UI 客户端的 IP | 到 Rancher UI/API 的 HTTPS 流量 |
+| TCP | 6443 | Kubernetes API 客户端 | 到 Kubernetes API 的 HTTPS 流量 |
+
+Rancher 节点的出站规则
+
+| 协议 | 端口 | 目标 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 443 | git.rancher.io | Rancher catalog |
+| TCP | 22 | 使用 Node Driver 创建的任何节点 | Node Driver 通过 SSH 配置节点 |
+| TCP | 2376 | 使用 Node Driver 创建的任何节点 | Node Driver 使用的 Docker daemon TLS 端口 |
+| TCP | 6443 | 托管/导入的 Kubernetes API | Kubernetes API Server |
+| TCP | 提供商依赖 | 托管集群中 Kubernetes API 端点的端口 | Kubernetes API |
+
+
+
+### RKE2 上 Rancher Server 节点的端口
+
+
+ 单击展开
+
+RKE2 server 需要开放端口 6443 和 9345 才能供集群中的其他节点访问。
+
+使用 Flannel VXLAN 时,所有节点都需要能够通过 UDP 端口 8472 访问其他节点。
+
+如果要使用 Metrics Server,则需要在每个节点上打开端口 10250。
+
+:::note 重要提示:
+
+节点上的 VXLAN 端口会开放集群网络,让任何人均能访问集群。因此,不要将 VXLAN 端口暴露给外界。请使用禁用 8472 端口的防火墙/安全组来运行节点。
+
+:::
+
+RKE2 Server 节点的入站规则
+
+| 协议 | 端口 | 源 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 9345 | RKE2 Server 和 Agent 节点 | 节点注册。需要在所有 Server 节点上将端口开放给集群中的所有其他节点。 |
+| TCP | 6443 | RKE2 Agent 节点 | Kubernetes API |
+| UDP | 8472 | RKE2 Server 和 Agent 节点 | 仅 Flannel VXLAN 需要 |
+| TCP | 10250 | RKE2 Server 和 Agent 节点 | kubelet |
+| TCP | 2379 | RKE2 Server 节点 | etcd 客户端端口 |
+| TCP | 2380 | RKE2 Server 节点 | etcd 对等端口 |
+| TCP | 30000-32767 | RKE2 Server 和 Agent 节点 | NodePort 端口范围。可以使用 TCP 或 UDP。 |
+| TCP | 5473 | Calico-node pod 连接到 typha pod | 使用 Calico 部署时需要 |
+| HTTP | 80 | 执行外部 SSL 终止的负载均衡器/代理 | 使用外部 SSL 终止时的 Rancher UI/API |
+| HTTPS | 443 | 托管/注册的 Kubernetes 任何需要使用 Rancher UI 或 API 的源 | Rancher Agent,Rancher UI/API,kubectl。如果负载均衡器执行 TLS 终止,则不需要。 |
+
+所有出站流量通常都是允许的。
+
+
+### Docker 安装的 Rancher Server 的端口
+
+
+ 单击展开
+
+下表描述了 Rancher 节点入站和出站流量的端口要求:
+
+Rancher 节点的入站规则
+
+| 协议 | 端口 | 源 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 80 | 执行外部 SSL 终止的负载均衡器/代理 | 使用外部 SSL 终止时的 Rancher UI/API |
+| TCP | 443 | 托管/注册的 Kubernetes 任何需要使用 Rancher UI 或 API 的源 | Rancher Agent,Rancher UI/API,kubectl |
+
+Rancher 节点的出站规则
+
+| 协议 | 端口 | 源 | 描述 |
+|-----|-----|----------------|---|
+| TCP | 22 | 使用 Node Driver 创建的节点的任何节点 IP | 使用 Node Driver SSH 配置节点 |
+| TCP | 443 | git.rancher.io | Rancher catalog |
+| TCP | 2376 | 使用 Node Driver 创建的节点的任何节点 IP | Docker Machine 使用的 Docker daemon TLS 端口 |
+| TCP | 6443 | 托管/导入的 Kubernetes API | Kubernetes API Server |
+
+
+
+## 下游 Kubernetes 集群节点
+
+下游 Kubernetes 集群用于运行你的应用和服务。本节介绍了哪些端口需要在下游集群的节点上打开,以便 Rancher 能够与它们进行通信。
+
+不同的下游集群的启动方式有不同的端口要求。下面的每个标签都列出了不同[集群类型](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)所需打开的端口。
+
+下图描述了为每个[集群类型](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)打开的端口。
+
+Rancher 管理面板的端口要求
+
+
+
+:::tip
+
+如果你对安全性的关注不是太高,而且也愿意多打开几个端口,你可以参考[常用端口](#常用端口)中列出的端口,而不是参考下方的表格。
+
+:::
+
+### Harvester 集群的端口
+
+有关 Harvester 端口要求的更多信息,请参阅[此处](../../../integrations-in-rancher/harvester.md#端口要求)。
+
+
+### Rancher 使用节点池启动 Kubernetes 集群的端口
+
+
+ 单击展开
+
+下表描述了节点在[云提供商](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)中创建的情况下,[Rancher 启动 Kubernetes](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 的端口要求。
+
+:::note
+
+在 AWS EC2 或 DigitalOcean 等云提供商中创建集群期间,Rancher 会自动打开所需的端口。
+
+:::
+
+
+
+
+
+### Rancher 使用自定义节点启动 Kubernetes 集群的端口
+
+
+ 单击展开
+
+下表描述了使用[自定义节点](../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)的情况下,[Rancher 启动 Kubernetes](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 的端口要求。
+
+
+
+
+
+### 托管 Kubernetes 集群的端口
+
+
+ 单击展开
+
+下表描述了[托管集群](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)的端口要求。
+
+
+
+
+
+### 已注册集群的端口
+
+:::note
+
+在 Rancher 2.5 之前,注册集群被称为导入集群。
+
+:::
+
+
+ 单击展开
+
+下表描述了[注册集群](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md)的端口要求。
+
+
+
+
+
+
+## 其他端口注意事项
+
+### 常用端口
+
+无论集群是什么类型,常用端口通常在你的 Kubernetes 节点上打开。
+
+import CommonPortsTable from '../../../shared-files/_common-ports-table.md';
+
+
+
+----
+
+### 本地节点流量
+
+上述要求中标记为`本地流量`(例如 `9099 TCP`)的端口会用于 Kubernetes 健康检查 (`livenessProbe` 和 `readinessProbe`)。
+这些健康检查是在节点本身执行的。在大多数云环境中,这种本地流量是默认允许的。
+
+但是,在以下情况下可能会阻止此流量:
+
+- 你已在节点上应用了严格的主机防火墙策略。
+- 你正在使用有多个接口(多宿主)的节点。
+
+在这些情况下,你必须在你的主机防火墙中主动允许这种流量,如果是公共/私有云托管的主机(例如 AWS 或 OpenStack),你需要在你的安全组配置中主动允许此流量。请记住,如果你在安全组中使用安全组作为源或目标,主动开放端口只适用于节点/实例的私有接口。
+
+### Rancher AWS EC2 安全组
+
+当你使用 [AWS EC2 Node Driver](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) 在 Rancher 中配置集群节点时,你可以让 Rancher 创建一个名为 `rancher-nodes` 的安全组。以下规则会自动添加到该安全组中。
+
+| 类型 | 协议 | 端口范围 | 源/目标 | 规则类型 |
+|-----------------|:--------:|:-----------:|------------------------|:---------:|
+| SSH | TCP | 22 | 0.0.0.0/0 | 入站 |
+| HTTP | TCP | 80 | 0.0.0.0/0 | 入站 |
+| 自定义 TCP 规则 | TCP | 443 | 0.0.0.0/0 | 入站 |
+| 自定义 TCP 规则 | TCP | 2376 | 0.0.0.0/0 | 入站 |
+| 自定义 TCP 规则 | TCP | 2379-2380 | sg-xxx (rancher-nodes) | 入站 |
+| 自定义 UDP 规则 | UDP | 4789 | sg-xxx (rancher-nodes) | 入站 |
+| 自定义 TCP 规则 | TCP | 6443 | 0.0.0.0/0 | 入站 |
+| 自定义 UDP 规则 | UDP | 8472 | sg-xxx (rancher-nodes) | 入站 |
+| 自定义 TCP 规则 | TCP | 10250-10252 | sg-xxx (rancher-nodes) | 入站 |
+| 自定义 TCP 规则 | TCP | 10256 | sg-xxx (rancher-nodes) | 入站 |
+| 自定义 TCP 规则 | TCP | 30000-32767 | 0.0.0.0/0 | 入站 |
+| 自定义 UDP 规则 | UDP | 30000-32767 | 0.0.0.0/0 | 入站 |
+| 所有流量 | 全部 | 全部 | 0.0.0.0/0 | 出站 |
+
+### 打开 SUSE Linux 端口
+
+SUSE Linux 可能有一个防火墙,默认情况下会阻止所有端口。要打开将主机添加到自定义集群所需的端口:
+
+
+
+
+1. SSH 进入实例。
+1. 以文本模式启动 YaST:
+ ```
+ sudo yast2
+ ```
+
+1. 导航到**安全和用户** > **防火墙** > **区域:公共** > **端口**。要在界面内导航,请参照[说明](https://doc.opensuse.org/documentation/leap/reference/html/book-reference/cha-yast-text.html#sec-yast-cli-navigate)。
+1. 要打开所需的端口,把它们输入到 **TCP 端口** 和 **UDP 端口** 字段。在这个例子中,端口 9796 和 10250 也被打开,用于监控。由此产生的字段应类似于以下内容:
+ ```yaml
+ TCP Ports
+ 22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767
+ UDP Ports
+ 8472, 30000-32767
+ ```
+
+1. 所有必须端口都输入后,选择**接受**。
+
+
+
+
+1. SSH 进入实例。
+1. 编辑 `/etc/sysconfig/SuSEfirewall2` 并打开所需的端口。在这个例子中,端口 9796 和 10250 也被打开,用于监控。
+ ```
+ FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767"
+ FW_SERVICES_EXT_UDP="8472 30000:32767"
+ FW_ROUTE=yes
+ ```
+1. 用新的端口重启防火墙:
+ ```
+ SuSEfirewall2
+ ```
+
+
+
+
+**结果** :该节点已打开添加到自定义集群所需的端口。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md
new file mode 100644
index 00000000000..4e36f893125
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md
@@ -0,0 +1,31 @@
+---
+title: 离线 Helm CLI 安装
+---
+
+本文介绍如何使用 Helm CLI 在离线环境中安装 Rancher Server。离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
+
+Rancher 安装在 RKE Kubernetes 集群、K3s Kubernetes 集群,或单个 Docker 容器上对应的安装步骤会有所不同。
+
+如需了解各个安装方式的更多信息,请参见[本页](../../installation-and-upgrade.md)。
+
+在安装指导中,我们为不同的安装选项提供对应的 _选项卡_ 。
+
+:::note 重要提示:
+
+如果你按照 Docker 安装指南安装 Rancher,你将没有把 Docker 安装转换为 Kubernetes 安装的升级途径。
+
+:::
+
+## 安装概要
+
+1. [设置基础设施和私有镜像仓库](infrastructure-private-registry.md)
+2. [收集镜像到私有镜像仓库](publish-images.md)
+3. [设置 Kubernetes 集群(如果你使用 Docker 安装,请跳过此步骤)](install-kubernetes.md)
+4. [安装 Rancher](install-rancher-ha.md)
+
+## 升级
+
+如需在离线环境中使用 Helm CLI 升级 Rancher,请按照[升级步骤](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md)进行操作。
+
+### 后续操作
+[准备节点](infrastructure-private-registry.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
new file mode 100644
index 00000000000..a639349d8cf
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md
@@ -0,0 +1,149 @@
+---
+title: Docker 安装命令
+---
+
+Docker 安装适用于想要测试 Rancher 的用户。
+
+你可以使用 `docker run`命令,把 Rancher Server 组件安装到单个节点上,而不需要运行 Kubernetes 集群。由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
+
+你可以使用备份应用,按照[这些步骤](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md),将 Rancher Server 从 Docker 安装迁移到 Kubernetes 安装。
+
+出于安全考虑,使用 Rancher 时请使用 SSL(Secure Sockets Layer)。SSL 保护所有 Rancher 网络通信(如登录和与集群交互)的安全。
+
+| 环境变量键 | 环境变量值 | 描述 |
+| -------------------------------- | -------------------------------- | ---- |
+| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | 将 Rancher Server 配置成在配置集群时,始终从私有镜像仓库中拉取镜像。 |
+| `CATTLE_SYSTEM_CATALOG` | `bundled` | 配置 Rancher Server 使用打包的 Helm System Chart 副本。[system charts](https://github.com/rancher/system-charts) 仓库包含所有 Monitoring,Logging,告警和全局 DNS 等功能所需的应用商店项目。这些 [Helm Chart](https://github.com/rancher/system-charts) 位于 GitHub 中。但是由于你处在离线环境,因此使用 Rancher 内置的 Chart 会比设置 Git mirror 容易得多。 |
+
+:::note 你是否需要:
+
+- 配置自定义 CA 根证书以访问服务。参见[自定义 CA 根证书](../../resources/custom-ca-root-certificates.md)。
+- 记录所有 Rancher API 的事务。参见 [API 审计](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)。
+
+:::
+
+选择以下的选项之一:
+
+### 选项 A:使用 Rancher 默认的自签名证书
+
+
+ 单击展开
+
+如果你在不考虑身份验证的开发或测试环境中安装 Rancher,可以使用 Rancher 生成的自签名证书安装 Rancher。这种安装方式避免了自己生成证书的麻烦。
+
+登录到你的 Linux 主机,然后运行下面的安装命令。输入命令时,参考下表来替换每个占位符。
+
+| 占位符 | 描述 |
+| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要安装的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+特权访问是[必须](./install-rancher-ha.md#rancher-特权访问)的。
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged \
+ /rancher/rancher:
+```
+
+
+
+### 选项 B:使用你自己的证书 - 自签名
+
+
+ 单击展开
+
+在你团队访问 Rancher Server 的开发或测试环境中,创建一个用于你的安装的自签名证书,以便团队验证他们对实例的连接。
+
+:::note 先决条件:
+
+从能连接到互联网的计算机上,使用 [OpenSSL](https://www.openssl.org/) 或其他方法创建自签名证书。
+
+- 证书文件的格式必须是 PEM。
+- 在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](../rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+
+:::
+
+创建证书后,登录 Linux 主机,然后运行以下安装命令。输入命令时,参考下表来替换每个占位符。使用 `-v` 标志并提供证书的路径,以将证书挂载到容器中。
+
+| 占位符 | 描述 |
+| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | CA 证书的路径。 |
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要安装的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+特权访问是[必须](./install-rancher-ha.md#rancher-特权访问)的。
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ -v //:/etc/rancher/ssl/cacerts.pem \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged \
+ /rancher/rancher:
+```
+
+
+
+### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
+
+
+ 单击展开
+
+在公开暴露应用的开发或测试环境中,请使用由可信 CA 签名的证书,以避免用户收到证书安全警告。
+
+:::note 先决条件:
+
+证书文件的格式必须是 PEM。
+
+:::
+
+获取证书后,登录 Linux 主机,然后运行以下安装命令。输入命令时,参考下表来替换每个占位符。因为你的证书是由可信的 CA 签名的,因此你不需要安装额外的 CA 证书文件。
+
+| 占位符 | 描述 |
+| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要安装的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+:::note
+
+使用 `--no-cacerts` 作为容器的参数,以禁用 Rancher 生成的默认 CA 证书。
+
+:::
+
+特权访问是[必须](./install-rancher-ha.md#rancher-特权访问)的。
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --no-cacerts \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged
+ /rancher/rancher:
+```
+
+
+
+
+
+:::note
+
+如果你不想发送遥测数据,在首次登录时退出[遥测](../../../../faq/telemetry.md)。
+
+:::
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
new file mode 100644
index 00000000000..8978a639b0e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md
@@ -0,0 +1,193 @@
+---
+title: '1. 设置基础设施和私有镜像仓库'
+---
+
+本文介绍如何在离线环境中,为 Rancher Management server 配置底层基础设施。你还将设置 Rancher 节点中必须可用的私有容器镜像仓库。
+
+离线环境是 Rancher Server 离线安装或安装在防火墙后面的环境。
+
+Rancher 安装在 K3s Kubernetes 集群、RKE Kubernetes 集群还是单个 Docker 容器上对应的基础设施设置会有所不同。如需了解各个安装方式的更多信息,请参见[本页](../../installation-and-upgrade.md)。
+
+Rancher 可以安装在任何 Kubernetes 集群上。为了阅读方便,我们在下文中仍提供了 RKE 和 K3s Kubernetes 基础设施教程。
+
+
+
+
+为了实现高可用安装,我们建议设置以下的基础设施:
+
+- **2 个 Linux 节点**:可以是你的云提供商中的虚拟机。
+- **1 个外部数据库**:用于存储集群数据。支持 PostgreSQL, MySQL 和 etcd。
+- **1 个负载均衡器**:用于将流量转发到这两个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+- **私有镜像仓库**,用于将容器镜像分发到你的主机。
+
+### 1. 配置 Linux 节点
+
+这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../installation-requirements/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置外部数据库
+
+K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
+
+对于 K3s 高可用安装,你需要配置以下的其中一个数据库:
+
+* [PostgreSQL](https://www.postgresql.org/)(10.7 和 11.5 已验证)
+* [MySQL](https://www.mysql.com/)(5.7 已验证)
+* [etcd](https://etcd.io/)(3.3.15 已验证)
+
+在安装 Kubernetes 时,你需要传入 K3s 连接数据库的详细信息。
+
+如需获取配置数据库示例,请参见[在 Amazon RDS 服务中配置 MySQL 数据库](../../../../how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md)的教程。
+
+如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
+
+### 3. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,K3s 工具会部署一个 Traefik Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 Traefik Ingress Controller 监听发往 Rancher 主机名的流量。Traefik Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md)。
+
+:::note 重要提示:
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 4. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
+
+### 5. 配置私有镜像仓库
+
+Rancher 支持使用私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
+
+在后续设置 K3s Kubernetes 集群时,你需要创建一个[私有镜像仓库配置文件](https://rancher.com/docs/k3s/latest/en/installation/private-registry/),其中包含此镜像仓库的信息。
+
+如果你需要创建私有镜像仓库,请参阅相应运行时的文档:
+
+* [Containerd](https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration).
+ * [Nerdctl 命令和镜像仓库托管服务](https://github.com/containerd/nerdctl/blob/main/docs/registry.md)
+* [Docker](https://docs.docker.com/registry/deploying/).
+
+
+
+
+如需在高可用 RKE 集群中安装 Rancher Management Server,我们建议配置以下基础设施:
+
+- **3 个 Linux 节点**:可以是你的云提供商(例如 Amazon EC2,GCE 或 vSphere)中的虚拟机。
+- **1 个负载均衡器**:用于将前端流量转发到这三个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+- **私有镜像仓库**,用于将容器镜像分发到你的主机。
+
+这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
+
+### 为什么使用三个节点?
+
+在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
+
+为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
+
+### 1. 配置 Linux 节点
+
+这些主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,RKE 工具会部署一个 NGINX Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 NGINX Ingress Controller 监听发往 Rancher 主机名的流量。NGINX Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md)。
+
+:::caution
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 3. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
+
+### 4. 配置私有镜像仓库
+
+Rancher 支持使用安全的私有镜像仓库进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
+
+在后续设置 RKE Kubernetes 集群时,你需要创建一个[私有镜像仓库配置文件](https://rancher.com/docs/rke/latest/en/config-options/private-registries/),其中包含此镜像仓库的信息。
+
+如果你需要创建私有镜像仓库,请参阅相应运行时的文档:
+
+* [Containerd](https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration).
+ * [Nerdctl 命令和镜像仓库托管服务](https://github.com/containerd/nerdctl/blob/main/docs/registry.md)
+* [Docker](https://docs.docker.com/registry/deploying/).
+
+
+
+
+:::note 注意事项:
+
+- Docker 安装适用于想要测试 Rancher 的用户。由于只有一个节点和一个 Docker 容器,因此如果该节点发生故障,你将丢失 Rancher Server 的所有数据。
+
+- Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
+
+:::
+
+### 1. 配置 Linux 节点
+
+此主机会断开互联网链接,但需要能与你的私有镜像仓库连接。
+
+请确保你的节点满足[操作系统,容器,硬件和网络](../../../../pages-for-subheaders/installation-requirements.md)的常规安装要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置私有 Docker 镜像仓库
+
+Rancher 支持使用私有镜像仓库在堡垒服务器中进行离线安装。你必须有自己的私有镜像仓库或使用其他方式将容器镜像分发到主机。
+
+如需获得创建私有镜像仓库的帮助,请参见 [Docker 官方文档](https://docs.docker.com/registry/)。
+
+
+
+
+### 后续操作
+[收集镜像并发布到你的私有镜像仓库](publish-images.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md
new file mode 100644
index 00000000000..3fcd8a992cf
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md
@@ -0,0 +1,387 @@
+---
+title: '3. 安装 Kubernetes(Docker 安装请跳过)'
+---
+
+:::note
+
+如果你使用 Docker 在单个节点上安装 Rancher,请跳过本节。
+
+:::
+
+本文描述了如何根据 [Rancher Server 环境的最佳实践](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#kubernetes-安装环境)来安装 Kubernetes 集群。该集群需要专用于运行 Rancher Server。
+
+Rancher 可以安装在任何 Kubernetes 集群上,包括托管的 Kubernetes。
+
+在 RKE、RKE2 或 K3s 上离线安装 Kubernetes 集群的步骤如下所示:
+
+
+
+
+在本指南中,我们假设你已经在离线环境中创建了节点,并且在堡垒服务器上有一个安全的 Docker 私有镜像仓库。
+
+### 安装概要
+
+1. [准备镜像目录](#1-准备镜像目录)
+2. [创建镜像仓库 YAML](#2-创建镜像仓库-yaml)
+3. [安装 K3s](#3-安装-k3s)
+4. [保存并开始使用 kubeconfig 文件](#4-保存并开始使用-kubeconfig-文件)
+
+### 1. 准备镜像目录
+从 [Releases](https://github.com/k3s-io/k3s/releases) 页面获取要运行的 K3s 版本的镜像 tar 文件。
+
+在每个节点上启动 K3s 之前,将这个 tar 文件放在 `images` 目录中,例如:
+
+```sh
+sudo mkdir -p /var/lib/rancher/k3s/agent/images/
+sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/
+```
+
+### 2. 创建镜像仓库 YAML
+把 `registries.yaml` 文件创建到 `/etc/rancher/k3s/registries.yaml` 中。此文件为 K3s 提供连接到你的私有镜像仓库的详细信息。
+
+在加入必要信息之前,`registries.yaml` 文件是这样的:
+
+```yaml
+---
+mirrors:
+ customreg:
+ endpoint:
+ - "https://ip-to-server:5000"
+configs:
+ customreg:
+ auth:
+ username: xxxxxx # 镜像仓库的用户名
+ password: xxxxxx # 镜像仓库的密码
+ tls:
+ cert_file: <镜像仓库所用的证书文件路径>
+ key_file: <镜像仓库所用的密钥文件路径>
+ ca_file: <镜像仓库所用的 CA 文件路径>
+```
+
+请注意,目前,K3s 仅支持安全的镜像仓库(带有自定义 CA 的 SSL)。
+
+有关 K3s 的私有镜像仓库配置文件的详情,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/private-registry/)。
+
+### 3. 安装 K3s
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/)。
+
+如需指定 K3s(Kubernetes)版本,在运行 K3s 安装脚本时使用 `INSTALL_K3S_VERSION` 环境变量(例如 `INSTALL_K3S_VERSION="v1.24.10+k3s1"`)。
+
+从 [Releases](https://github.com/k3s-io/k3s/releases) 页面获取 K3s 的二进制文件,该文件要匹配用于获取离线镜像的 tar 版本。
+访问 [K3s 安装脚本](https://get.k3s.io)以获取 K3s 的安装脚本。
+
+将二进制文件放到每个节点的 `/usr/local/bin` 中。
+将安装脚本放在每个节点的任意位置,并将脚本命名为 `install.sh`。
+
+在每个 Server 上安装 K3s:
+
+```
+INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_VERSION= ./install.sh
+```
+
+在每个 Agent 上安装 K3s:
+
+```
+INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_VERSION= K3S_URL=https://:6443 K3S_TOKEN= ./install.sh
+```
+
+其中 `` 是 Server 的 IP 或有效 DNS,`` 是可以在 `/var/lib/rancher/k3s/server/node-token` 中找到的 Server node-token。
+
+:::note
+
+K3s 自动为 kubelets 提供 `--resolv-conf` 标志,该标志可能对在离线环境中配置 DNS 有帮助。
+
+:::
+
+### 4. 保存并开始使用 kubeconfig 文件
+
+在每个 Rancher Server 节点安装 K3s 时,会在每个节点的 `/etc/rancher/k3s/k3s.yaml` 中生成一个 `kubeconfig` 文件。该文件包含访问集群的凭证。请将该文件保存在安全的位置。
+
+如要使用该 `kubeconfig` 文件:
+
+1. 安装 Kubernetes 命令行工具 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl)。
+2. 复制 `/etc/rancher/k3s/k3s.yaml` 文件并保存到本地主机的 `~/.kube/config` 目录上。
+3. 在 kubeconfig 文件中,`server` 的参数为 localhost。你需要将 `server` 配置为负载均衡器的 DNS,并指定端口 6443(通过端口 6443 访问 Kubernetes API Server,通过端口 80 和 443 访问 Rancher Server)。以下是一个 `k3s.yaml` 示例:
+
+```yaml
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: [CERTIFICATE-DATA]
+ server: [LOAD-BALANCER-DNS]:6443 # 编辑此行
+ name: default
+contexts:
+- context:
+ cluster: default
+ user: default
+ name: default
+current-context: default
+kind: Config
+preferences: {}
+users:
+- name: default
+ user:
+ password: [PASSWORD]
+ username: admin
+```
+
+**结果**:你可以开始使用 `kubectl` 来管理你的 K3s 集群。如果你有多个 `kubeconfig` 文件,在使用 `kubectl` 时,你可以传入文件路径来指定要使用的 `kubeconfig` 文件:
+
+```
+kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces
+```
+
+有关 `kubeconfig` 文件的详情,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/cluster-access/) 或 [ Kubernetes 官方文档](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)中关于使用 `kubeconfig` 文件管理集群访问的部分。
+
+### 升级注意事项
+
+你可以通过以下方式完成离线环境的升级:
+
+1. 从 [Releases](https://github.com/k3s-io/k3s/releases) 页面下载要升级的 K3s 版本的新离线镜像 tar 包。将 tar 文件放在每个节点上的 `/var/lib/rancher/k3s/agent/images/` 目录中。删除旧的 tar 文件。
+2. 复制并替换每个节点上 `/usr/local/bin` 中的旧 K3s 二进制文件。复制 [K3s 安装脚本](https://get.k3s.io)(因为脚本可能自上次版本发布以来已更改)。使用相同的环境变量再次运行脚本。
+3. 重启 K3s 服务(如果安装程序没有自动重启 K3s 的话)。
+
+
+
+
+在本指南中,我们假设你已经在离线环境中创建了节点,并且在堡垒服务器上有一个安全的 Docker 私有镜像仓库。
+
+### 安装概要
+
+1. [创建 RKE2 配置](#1-创建-rke2-配置)
+2. [创建镜像仓库 YAML](#2-创建镜像仓库-yaml)
+3. [安装 RKE2](#3-安装-rke2)
+4. [保存并开始使用 kubeconfig 文件](#4-保存并开始使用-kubeconfig-文件)
+
+### 1. 创建 RKE2 配置
+把 config.yaml 文件创建到 `/etc/rancher/rke2/config.yaml` 中。这将包含创建高可用 RKE2 集群所需的所有配置选项。
+
+第一台服务器的最低配置是:
+
+```
+token: my-shared-secret
+tls-san:
+ - loadbalancer-dns-domain.com
+```
+
+其他服务器的配置文件应该包含相同的令牌,并让 RKE2 知道要连接到现有的第一台服务器:
+
+```
+server: https://ip-of-first-server:9345
+token: my-shared-secret
+tls-san:
+ - loadbalancer-dns-domain.com
+```
+
+有关详细信息,请参阅 [RKE2 文档](https://docs.rke2.io/install/ha)。
+
+:::note
+
+RKE2 自动为 kubelets 提供 `resolv-conf` 选项,该标志可能对在离线环境中配置 DNS 有帮助。
+
+:::
+
+### 2. 创建镜像仓库 YAML
+把 `registries.yaml` 文件创建到 `/etc/rancher/rke2/registries.yaml` 中。此文件为 RKE2 提供连接到你的私有镜像仓库的详细信息。
+
+在加入必要信息之前,`registries.yaml` 文件是这样的:
+
+```
+---
+mirrors:
+ customreg:
+ endpoint:
+ - "https://ip-to-server:5000"
+configs:
+ customreg:
+ auth:
+ username: xxxxxx # 镜像仓库的用户名
+ password: xxxxxx # 镜像仓库的密码
+ tls:
+ cert_file: <镜像仓库所用的证书文件路径>
+ key_file: <镜像仓库所用的密钥文件路径>
+ ca_file: <镜像仓库所用的 CA 文件路径>
+```
+
+有关 RKE2 的私有镜像仓库配置文件的详情,请参见 [RKE2 官方文档](https://docs.rke2.io/install/containerd_registry_configuration)。
+
+### 3. 安装 RKE2
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见[支持维护条款](https://rancher.com/support-maintenance-terms/)。
+
+从 Release 页面下载安装脚本、rke2、rke2-images 和 sha256sum 存档,并将它们上传到每个服务器上的目录中:
+
+```
+mkdir /tmp/rke2-artifacts && cd /tmp/rke2-artifacts/
+wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2-images.linux-amd64.tar.zst
+wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2.linux-amd64.tar.gz
+wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/sha256sum-amd64.txt
+curl -sfL https://get.rke2.io --output install.sh
+```
+
+接下来,使用每个服务器上的目录运行 install.sh,如下例所示:
+
+```
+INSTALL_RKE2_ARTIFACT_PATH=/tmp/rke2-artifacts sh install.sh
+```
+
+然后在所有服务器上启用并启动该服务:
+
+``
+systemctl enable rke2-server.service
+systemctl start rke2-server.service
+``
+
+有关详细信息,请参阅 [RKE2 文档](https://docs.rke2.io/install/airgap)。
+
+### 4. 保存并开始使用 kubeconfig 文件
+
+在每个 Rancher Server 节点安装 RKE2 时,会在每个节点的 `/etc/rancher/rke2/rke2.yaml` 中生成一个 `kubeconfig` 文件。该文件包含访问集群的凭证。请将该文件保存在安全的位置。
+
+如要使用该 `kubeconfig` 文件:
+
+1. 安装 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl)(Kubernetes 命令行工具)。
+2. 复制 `/etc/rancher/rke2/rke2.yaml` 文件并保存到本地主机的 `~/.kube/config` 目录上。
+3. 在 kubeconfig 文件中,`server` 的参数为 localhost。你需要将 `server` 配置为负载均衡器的 DNS,并指定端口 6443(通过端口 6443 访问 Kubernetes API Server,通过端口 80 和 443 访问 Rancher Server)。以下是一个 `rke2.yaml` 示例:
+
+```
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: [CERTIFICATE-DATA]
+ server: [LOAD-BALANCER-DNS]:6443 # 编辑此行
+ name: default
+contexts:
+- context:
+ cluster: default
+ user: default
+ name: default
+current-context: default
+kind: Config
+preferences: {}
+users:
+- name: default
+ user:
+ password: [PASSWORD]
+ username: admin
+```
+
+**结果**:你可以开始使用 `kubectl` 来管理你的 RKE2 集群。如果你有多个 `kubeconfig` 文件,在使用 `kubectl` 时,你可以传入文件路径来指定要使用的 `kubeconfig` 文件:
+
+```
+kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces
+```
+
+有关 `kubeconfig` 文件的详情,请参见 [RKE2 官方文档](https://docs.rke2.io/cluster_access)或 [ Kubernetes 官方文档](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)中关于使用 `kubeconfig` 文件管理集群访问的部分。
+
+### 升级注意事项
+
+你可以通过以下方式完成离线环境的升级:
+
+1. 从 [Releases](https://github.com/rancher/rke2/releases) 页面下载新的离线工件,并安装升级 RKE2 版本的脚本。
+2. 使用相同的环境变量再次运行脚本。
+3. 重启 RKE2 服务。
+
+
+
+我们将使用 Rancher Kubernetes Engine (RKE) 创建一个 Kubernetes 集群。在启动 Kubernetes 集群之前,你需要安装 RKE 并创建 RKE 配置文件。
+
+### 1. 安装 RKE
+
+参照 [RKE 官方文档](https://rancher.com/docs/rke/latest/en/installation/)的说明安装 RKE。
+
+:::note
+
+你可以在 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/)中找到基于 Rancher 版本的 RKE 认证版本。
+
+:::
+
+### 2. 创建 RKE 配置文件
+
+在可访问你 Linux 主机节点上的 22/TCP 端口和 6443/TCP 端口的系统上,使用以下示例创建一个名为 `rancher-cluster.yml` 的新文件。
+
+该文件是 RKE 配置文件,用于配置你要部署 Rancher 的集群。
+
+参考下方的 _RKE 选项_ 表格,修改代码示例中的参数。使用你创建的三个节点的 IP 地址或 DNS 名称。
+
+:::tip
+
+如需获取可用选项的详情,请参见 RKE [配置选项](https://rancher.com/docs/rke/latest/en/config-options/)。
+
+:::
+
+RKE 选项
+
+| 选项 | 必填 | 描述 |
+| ------------------ | -------------------- | --------------------------------------------------------------------------------------- |
+| `address` | ✓ | 离线环境中节点的 DNS 或 IP 地址 |
+| `user` | ✓ | 可运行 Docker 命令的用户 |
+| `role` | ✓ | 分配给节点的 Kubernetes 角色列表 |
+| `internal_address` | 可选1 | 用于集群内部流量的 DNS 或 IP 地址 |
+| `ssh_key_path` | | 用来验证节点的 SSH 私钥文件路径(默认值为 `~/.ssh/id_rsa`) |
+
+> 1 如果你想使用引用安全组或防火墙,某些服务(如 AWS EC2)要求设置 `internal_address`。
+
+```yaml
+nodes:
+ - address: 10.10.3.187 # 离线环境节点 IP
+ internal_address: 172.31.7.22 # 节点内网 IP
+ user: rancher
+ role: ['controlplane', 'etcd', 'worker']
+ ssh_key_path: /home/user/.ssh/id_rsa
+ - address: 10.10.3.254 # 离线环境节点 IP
+ internal_address: 172.31.13.132 # 节点内网 IP
+ user: rancher
+ role: ['controlplane', 'etcd', 'worker']
+ ssh_key_path: /home/user/.ssh/id_rsa
+ - address: 10.10.3.89 # 离线环境节点 IP
+ internal_address: 172.31.3.216 # 节点内网 IP
+ user: rancher
+ role: ['controlplane', 'etcd', 'worker']
+ ssh_key_path: /home/user/.ssh/id_rsa
+
+private_registries:
+ - url: # 私有镜像仓库 URL
+ user: rancher
+ password: '*********'
+ is_default: true
+```
+
+### 3. 运行 RKE
+
+配置 `rancher-cluster.yml`后,启动你的 Kubernetes 集群:
+
+```
+rke up --config ./rancher-cluster.yml
+```
+
+### 4. 保存你的文件
+
+:::note 重要提示:
+
+维护、排除问题和升级集群需要用到以下文件,请妥善保管这些文件:
+
+:::
+
+将以下文件的副本保存在安全位置:
+
+- `rancher-cluster.yml`:RKE 集群配置文件。
+- `kube_config_cluster.yml`:集群的 [Kubeconfig 文件](https://rancher.com/docs/rke/latest/en/kubeconfig/)。该文件包含可完全访问集群的凭证。
+- `rancher-cluster.rkestate`:[Kubernetes 集群状态文件](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state)。该文件包含集群的当前状态,包括 RKE 配置以及证书 。 _Kubernetes 集群状态文件仅在使用 RKE 0.2.0 或更高版本时创建。_
+
+
+
+
+:::note
+
+后两个文件名中的 `rancher-cluster` 部分取决于你命名 RKE 集群配置文件的方式。
+
+:::
+
+### 故障排除
+
+参见[故障排除](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)页面。
+
+### 后续操作
+[安装 Rancher](install-rancher-ha.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
new file mode 100644
index 00000000000..d28b59ed745
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md
@@ -0,0 +1,246 @@
+---
+title: 4. 安装 Rancher
+---
+
+本文介绍如何在高可用 Kubernetes 安装的离线环境部署 Rancher。离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
+
+### Rancher 特权访问
+
+当 Rancher Server 部署在 Docker 容器中时,容器内会安装一个本地 Kubernetes 集群供 Rancher 使用。为 Rancher 的很多功能都是以 deployment 的方式运行的,而在容器内运行容器是需要特权模式的,因此你需要在安装 Rancher 时添加 `--privileged` 选项。
+
+## Docker 说明
+
+如果你想使用 Docker 命令进行离线安装,请跳过本页的剩余部分,并按照[此页](docker-install-commands.md)进行操作。
+
+## Kubernetes 说明
+
+我们建议在 Kubernetes 集群上安装 Rancher。高可用的 Kubernetes 安装的情况下,一个 Kubernetes 集群包含三个运行 Rancher Server 组件的节点。持久层(etcd)也在这三个节点上进行复制,以便在其中一个节点发生故障时提供冗余和数据复制。
+
+### 1. 添加 Helm Chart 仓库
+
+从可以访问互联网的系统中,获取最新的 Helm Chart,然后将 manifest 复制到可访问 Rancher Server 集群的系统中。
+
+1. 如果你还没有安装 `helm`,请在可访问互联网的工作站上进行本地安装。注意:参考 [Helm 版本要求](../../resources/helm-version-requirements.md)选择 Helm 版本来安装 Rancher。
+
+2. 执行 `helm repo add` 命令,以添加包含安装 Rancher 的 Chart 的 Helm Chart 仓库。有关如何选择仓库,以及哪个仓库最适合你的用例,请参见[选择 Rancher 版本](../../resources/choose-a-rancher-version.md)。
+ - Latest:建议用于试用最新功能
+ ```
+ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
+ ```
+ - Stable:建议用于生产环境
+ ```
+ helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
+ ```
+ - Alpha:即将发布的实验性预览。
+ ```
+ helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha
+ ```
+ 注意:不支持升级到 Alpha 版、从 Alpha 版升级或在 Alpha 版之间升级。
+
+3. 获取最新的 Rancher Chart。此操作将获取 Chart 并将其作为 `.tgz` 文件保存在当前目录中。
+ ```plain
+ helm fetch rancher-/rancher
+ ```
+
+ 如需下载特定的 Rancher 版本,你可以用 Helm `--version` 参数指定版本,如下:
+ ```plain
+ helm fetch rancher-stable/rancher --version=v2.4.8
+ ```
+
+### 2. 选择 SSL 配置
+
+Rancher Server 默认设计为安全的,并且需要 SSL/TLS 配置。
+
+如果你在离线的 Kubernetes 集群中安装 Rancher,我们建议使用以下两种证书生成方式。
+
+:::note
+
+如果你想在外部终止 SSL/TLS,请参见[外部负载均衡器的 TLS 终止](../../installation-references/helm-chart-options.md#外部-tls-终止)。
+
+:::
+
+| 配置 | Chart 选项 | 描述 | 是否需要 cert-manager |
+| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- |
+| Rancher 生成的自签名证书 | `ingress.tls.source=rancher` | 使用 Rancher 生成的 CA 签发的自签名证书。此项是**默认选项**。在渲染 Helm 模板的时候不需要传递此项。 | 是 |
+| 你已有的证书 | `ingress.tls.source=secret` | 通过创建 Kubernetes 密文使用你自己的证书文件。 在渲染 Rancher Helm 模板时必须传递此选项。 | 否 |
+
+### 离线安装的 Helm Chart 选项
+
+在配置 Rancher Helm 模板时,Helm Chart 中有几个专为离线安装设计的选项,如下表:
+
+| Chart 选项 | Chart 值 | 描述 |
+| ----------------------- | -------------------------------- | ---- |
+| `certmanager.version` | `` | 根据运行的 cert-manager 版本配置适当的 Rancher TLS 颁发者。 |
+| `systemDefaultRegistry` | `` | 将 Rancher Server 配置成在配置集群时,始终从私有镜像仓库中拉取镜像。 |
+| `useBundledSystemChart` | `true` | 配置 Rancher Server 使用打包的 Helm System Chart 副本。[system charts](https://github.com/rancher/system-charts) 仓库包含所有 Monitoring,Logging,告警和全局 DNS 等功能所需的应用商店项目。这些 [Helm Chart](https://github.com/rancher/system-charts) 位于 GitHub 中。但是由于你处在离线环境,因此使用 Rancher 内置的 Chart 会比设置 Git mirror 容易得多。 |
+
+### 3. 获取 Cert-Manager Chart
+
+根据你在[2:选择 SSL 配置](#2-选择-ssl-配置)中的选择,完成以下步骤之一:
+
+#### 选项 A:使用 Rancher 默认的自签名证书
+
+默认情况下,Rancher 会生成一个 CA 并使用 cert-manager 颁发证书以访问 Rancher Server 界面。
+
+:::note
+
+由于 cert-manager 的最新改动,你需要升级 cert-manager 版本。如果你需要升级 Rancher 并使用低于 0.11.0 的 cert-manager 版本,请参见 [cert-manager 升级文档](../../resources/upgrade-cert-manager.md)。
+
+:::
+
+##### 1. 添加 cert-manager 仓库
+
+在可以连接互联网的系统中,将 cert-manager 仓库添加到 Helm:
+
+```plain
+helm repo add jetstack https://charts.jetstack.io
+helm repo update
+```
+
+##### 2. 获取 cert-manager Chart
+
+从 [Helm Chart 仓库](https://artifacthub.io/packages/helm/cert-manager/cert-manager)中获取最新可用的 cert-manager Chart:
+
+```plain
+helm fetch jetstack/cert-manager
+```
+
+##### 3. 检索 Cert-Manager CRD
+
+为 cert-manager 下载所需的 CRD 文件:
+```plain
+curl -L -o cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/releases/download//cert-manager.crds.yaml
+```
+
+### 4. 安装 Rancher
+
+将获取的 Chart 复制到有权访问 Rancher Server 集群的系统以完成安装。
+
+##### 1. 安装 Cert-Manager
+
+使用要用于安装 Chart 的选项来安装 cert-manager。记住要设置 `image.repository` 选项,以从你的私有镜像仓库拉取镜像。此操作会创建一个包含 Kubernetes manifest 文件的 `cert-manager` 目录。
+
+:::note
+
+要查看自定义 cert-manager 安装的选项(包括集群使用 PodSecurityPolicies 的情况),请参阅 [cert-manager 文档](https://artifacthub.io/packages/helm/cert-manager/cert-manager#configuration)。
+
+:::
+
+
+ 单击展开
+
+如果你使用自签名证书,安装 cert-manager:
+
+1. 为 cert-manager 创建命名空间:
+
+ ```plain
+ kubectl create namespace cert-manager
+ ```
+
+2. 创建 cert-manager CustomResourceDefinition (CRD)。
+
+ ```plain
+ kubectl apply -f cert-manager/cert-manager-crd.yaml
+ ```
+
+3. 安装 cert-manager。
+
+ ```plain
+ helm install cert-manager ./cert-manager-.tgz \
+ --namespace cert-manager \
+ --set image.repository=/quay.io/jetstack/cert-manager-controller \
+ --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \
+ --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \
+ --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl
+ ```
+
+
+
+##### 2. 安装 Rancher
+首先,参见[添加 TLS 密文](../../resources/add-tls-secrets.md)发布证书文件,以便 Rancher 和 Ingress Controller 可以使用它们。
+
+然后,使用 kubectl 为 Rancher 创建命名空间:
+
+```plain
+kubectl create namespace cattle-system
+```
+
+然后安装 Rancher,并声明你选择的选项。参考下表来替换每个占位符。Rancher 需要配置为使用私有镜像仓库,以便配置所有 Rancher 启动的 Kubernetes 集群或 Rancher 工具。
+
+对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 输出压缩包的版本号。 |
+| `` | 指向负载均衡器的 DNS 名称。 |
+| `` | 你的私有镜像仓库的 DNS 名称。 |
+| `` | 在 K8s 集群上运行的 cert-manager 版本。 |
+
+```plain
+ helm install rancher ./rancher-.tgz \
+ --namespace cattle-system \
+ --set hostname= \
+ --set certmanager.version= \
+ --set rancherImage=/rancher/rancher \
+ --set systemDefaultRegistry= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ --set useBundledSystemChart=true # 使用打包的 Rancher System Chart
+```
+
+**可选**:如需安装特定的 Rancher 版本,设置`rancherImageTag` 的值,例如:`--set rancherImageTag=v2.5.8`
+
+#### 选项 B:使用 Kubernetes 密文从文件中获取证书
+
+##### 1. 创建密文
+
+使用你自己的证书来创建 Kubernetes 密文,以供 Rancher 使用。证书的 common name 需要与以下命令中的 `hostname` 选项匹配,否则 Ingress Controller 将无法为 Rancher 配置站点。
+
+##### 2. 安装 Rancher
+
+安装 Rancher,并声明你选择的选项。参考下表来替换每个占位符。Rancher 需要配置为使用私有镜像仓库,以便配置所有 Rancher 启动的 Kubernetes 集群或 Rancher 工具。
+
+对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+| 占位符 | 描述 |
+| -------------------------------- | ----------------------------------------------- |
+| `` | 输出压缩包的版本号。 |
+| `` | 指向负载均衡器的 DNS 名称。 |
+| `` | 你的私有镜像仓库的 DNS 名称。 |
+
+```plain
+ helm install rancher ./rancher-.tgz \
+ --namespace cattle-system \
+ --set hostname= \
+ --set rancherImage=/rancher/rancher \
+ --set ingress.tls.source=secret \
+ --set systemDefaultRegistry= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ --set useBundledSystemChart=true # 使用打包的 Rancher System Chart
+```
+
+如果你使用的是私有 CA 签名的证书,请在 `--set ingress.tls.source=secret` 后加上 `--set privateCA=true`:
+
+```plain
+ helm install rancher ./rancher-.tgz \
+ --namespace cattle-system \
+ --set hostname= \
+ --set rancherImage=/rancher/rancher \
+ --set ingress.tls.source=secret \
+ --set privateCA=true \
+ --set systemDefaultRegistry= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ --set useBundledSystemChart=true # 使用打包的 Rancher System Chart
+```
+
+
+安装已完成。
+:::caution
+
+如果你不想发送遥测数据,在首次登录时退出[遥测](../../../../faq/telemetry.md)。如果在离线安装的环境中让这个功能处于 active 状态,socket 可能无法打开。
+
+:::
+
+## 其他资源
+
+以下资源可能对安装 Rancher 有帮助:
+
+- [Rancher Helm Chart 选项](../../installation-references/helm-chart-options.md)
+- [添加 TLS 密文](../../resources/add-tls-secrets.md)
+- [Rancher Kubernetes 安装的故障排除](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md
new file mode 100644
index 00000000000..783555ba158
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md
@@ -0,0 +1,305 @@
+---
+title: '2. 收集镜像并发布到私有仓库'
+---
+
+本文介绍如何配置私有镜像仓库,以便在安装 Rancher 时,Rancher 可以从此私有镜像仓库中拉取所需的镜像。
+
+默认情况下,所有用于[配置 Kubernetes 集群](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)或启动 Rancher 中的工具(如监控,流水线,告警等)的镜像都是从 Docker Hub 中拉取的。在 Rancher 的离线安装中,你需要一个私有仓库,该仓库位于你的 Rancher Server 中某个可访问的位置。然后,你可加载该存有所有镜像的镜像仓库。
+
+使用 Docker 安装 Rancher,和把 Rancher 安装到 Kubernetes 集群,其对应的推送镜像到私有镜像仓库步骤是一样的。
+
+你使用 Rancher 配置的下游集群是否有运行 Windows 的节点,决定了本文涉及的步骤。我们提供的推送镜像到私有镜像仓库步骤,是基于假设 Rancher 仅配置运行 Linux 节点的下游 Kubernetes 集群的。但是,如果你计划[在下游 Kubernetes 集群中使用 Windows 节点](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/use-windows-clusters/use-windows-clusters.md),我们有单独的文档来介绍如何为需要的镜像提供支持。
+
+:::note 先决条件:
+
+你必须有一个可用的[私有镜像仓库](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry)。
+
+如果镜像仓库有证书,请参见 [K3s 文档中心](https://rancher.com/docs/k3s/latest/en/installation/private-registry/)了解添加私有镜像仓库的详情。证书和镜像仓库配置文件均需要挂载到 Rancher 容器中。
+
+:::
+
+
+
+
+如果 Rancher Server 用于配置仅有 Linux 节点的集群,请按以下步骤将你的镜像推送到私有镜像仓库。
+
+1. [找到你的 Rancher 版本所需的资源](#1-找到你的-rancher-版本所需的资源)
+2. [收集 cert-manager 镜像](#2-收集-cert-manager-镜像)(除非你使用自己的证书,或在负载均衡器上终止 TLS)
+3. [把镜像保存到你的工作站](#3-将镜像保存到你的工作站中)
+4. [推送镜像到私有镜像仓库](#4-推送镜像到私有镜像仓库)
+
+### 先决条件
+
+这些步骤要求你使用一个 Linux 工作站,该工作站需要可访问互联网和你的私有镜像仓库,且至少有 20GB 的可用磁盘空间。
+
+如果你的主机架构是 ARM64,镜像仓库必须支持 Manifest。这是因为从 2020 年 4 月开始, Amazon Elastic Container Registry 已经不再支持 Manifest。
+
+### 1. 找到你的 Rancher 版本所需的资源
+
+1. 访问 Rancher 的[发布说明](https://github.com/rancher/rancher/releases)页面,找到你需要安装的 Rancher v2.x.x 版本,然后点击 **Assets**。注意不要使用带有 `rc` 或 `Pre-release` 标记的版本,因为这些版本在生产环境中不够稳定。
+
+2. 从你所需版本的 **Assets** 处下载以下文件,这些文件是在离线环境中安装 Rancher 所必须的:
+
+| Release 文件 | 描述 |
+| ---------------- | -------------- |
+| `rancher-images.txt` | 此文件包含安装 Rancher、配置集群和用户 Rancher 工具所需的镜像。 |
+| `rancher-save-images.sh` | 该脚本从 Docker Hub 中拉取在文件 `rancher-images.txt` 中记录的所有镜像,并把这些镜像保存为 `rancher-images.tar.gz`。 |
+| `rancher-load-images.sh` | 该脚本从 `rancher-images.tar.gz` 文件中加载镜像,并把镜像推送到你的私有镜像仓库。 |
+
+### 2. 收集 cert-manager 镜像
+
+:::note
+
+如果你使用自己的证书,或要在外部负载均衡器上终止 TLS,请跳过此步骤。
+
+:::
+
+在 Kubernetes 安装中,如果你使用的是 Rancher 默认的自签名 TLS 证书,则必须将 [`cert-manager`](https://artifacthub.io/packages/helm/cert-manager/cert-manager) 镜像添加到 `rancher-images.txt` 文件中。
+
+1. 获取最新的 `cert-manager` Helm Chart,并解析模板以获取镜像的详情信息:
+
+ :::note
+
+ 由于 cert-manager 的最新改动,你需要升级 cert-manager 版本。如果你需要升级 Rancher 并使用低于 0.12.0 的 cert-manager 版本,请参见[升级文档](../../resources/upgrade-cert-manager.md)。
+
+ :::
+
+ ```plain
+ helm repo add jetstack https://charts.jetstack.io
+ helm repo update
+ helm fetch jetstack/cert-manager
+ helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt
+ ```
+
+2. 对镜像列表进行排序和唯一化,以去除重复的镜像源:
+
+ ```plain
+ sort -u rancher-images.txt -o rancher-images.txt
+ ```
+
+### 3. 将镜像保存到你的工作站中
+
+1. 为 `rancher-save-images.sh` 文件添加可执行权限:
+ ```
+ chmod +x rancher-save-images.sh
+ ```
+
+1. 使用 `rancher-images.txt` 镜像列表执行 `rancher-save-images.sh` 脚本,以创建包含所有所需镜像的压缩包:
+ ```plain
+ ./rancher-save-images.sh --image-list ./rancher-images.txt
+ ```
+ **结果**:Docker 开始拉取用于离线安装的镜像。请耐心等待。这个过程需要几分钟。完成时,你的当前目录会输出名为 `rancher-images.tar.gz` 的压缩包。请确认输出文件是否存在。
+
+### 4. 推送镜像到私有镜像仓库
+
+下一步,你将使用脚本将 `rancher-images.tar.gz` 中的镜像移动到你的私有镜像仓库,以方便加载镜像。
+
+使用脚本将 `rancher-images.tar.gz` 中的镜像移动到你的私有镜像仓库,以方便加载镜像。
+
+`rancher-images.txt` 需要位于工作站中运行 `rancher-load-images.sh` 脚本的同一目录中。`rancher-images.tar.gz` 也需要位于同一目录中。
+
+1. 登录到你的私有镜像仓库:
+```plain
+docker login
+```
+1. 为 `rancher-load-images.sh` 添加可执行权限:
+```
+chmod +x rancher-load-images.sh
+```
+
+1. 使用 `rancher-load-images.sh` 脚本来提取,标记和推送 `rancher-images.txt` 及 `rancher-images.tar.gz` 到你的私有镜像仓库:
+```plain
+./rancher-load-images.sh --image-list ./rancher-images.txt --registry
+```
+
+
+
+
+如果你的 Rancher Server 将用于配置 Linux 和 Windows 集群,你需要执行不同的步骤,来将 Windows 镜像和 Linux 镜像推送到你的私有镜像仓库。由于 Windows 集群同时包含 Linux 和 Windows 节点,因此推送到私有镜像仓库的 Linux 镜像是 Manifest。
+
+## Windows 步骤
+
+从 Windows Server 工作站中收集和推送 Windows 镜像。
+
+1. 找到你的 Rancher 版本所需的资源
+2. 将镜像保存到你的 Windows Server 工作站
+3. 准备 Docker daemon
+4. 推送镜像到私有镜像仓库
+
+### 先决条件
+
+以下步骤假设你使用 Windows Server 1809 工作站,该工作站能访问网络及你的私有镜像仓库,且至少拥有 50GB 的磁盘空间。
+
+工作站必须安装 Docker 18.02+ 版本以提供 manifest 支持。Manifest 支持是配置 Windows 集群所必须的。
+
+你的镜像仓库必须支持 Manifest。这是因为从 2020 年 4 月开始, Amazon Elastic Container Registry 已经不再支持 Manifest。
+
+
+
+### 1. 找到你的 Rancher 版本所需的资源
+
+1. 访问 Rancher 的[发布说明](https://github.com/rancher/rancher/releases)页面,找到你需要安装的 Rancher v2.x.x 版本。不要下载带有 `rc` 或 `Pre-release` 标记的版本,因为这些版本在生产环境中不够稳定。
+
+2. 从你所需版本的 **Assets** 处下载以下文件:
+
+| Release 文件 | 描述 |
+|----------------------------|------------------|
+| `rancher-windows-images.txt` | 此文件包含配置 Windows 集群所需的 Windows 镜像。 |
+| `rancher-save-images.ps1` | 该脚本从 Docker Hub 中拉取在文件 `rancher-windows-images.txt` 中记录的所有镜像,并把这些镜像保存为 `rancher-windows-images.tar.gz`。 |
+| `rancher-load-images.ps1` | 该脚本从 `rancher-windows-images.tar.gz` 文件中加载镜像,并把镜像推送到你的私有镜像仓库。 |
+
+
+
+### 2. 将镜像保存到你的 Windows Server 工作站
+
+1. 在 `powershell` 中,进入上一步下载的文件所在的目录。
+
+1. 运行 `rancher-save-images.ps1` 以创建包含所有所需镜像的压缩包:
+ ```plain
+ ./rancher-save-images.ps1
+ ```
+
+ **结果**:Docker 开始拉取用于离线安装的镜像。请耐心等待。这个过程需要几分钟。完成时,你的当前目录会输出名为 `rancher-windows-images.tar.gz` 的压缩包。请确认输出文件是否存在。
+
+
+
+### 3. 准备 Docker daemon
+
+将你的私有镜像仓库地址尾附到 Docker daemon (`C:\\ProgramData\\Docker\\config\\daemon.json`) 的 `allow-nondistributable-artifacts` 配置字段中。Windows 镜像的基础镜像是由 `mcr.microsoft.com` 镜像仓库维护的,而 Docker Hub 中缺少 Microsoft 镜像仓库层,且需要将其拉入私有镜像仓库,因此这一步骤是必须的。
+
+```json
+{
+ ...
+ "allow-nondistributable-artifacts": [
+ ...
+ ""
+ ]
+ ...
+}
+```
+
+
+
+### 4. 推送镜像到私有镜像仓库
+
+使用脚本将 `rancher-windows-images.tar.gz` 中的镜像移动到你的私有镜像仓库,以方便加载镜像。
+
+`rancher-windows-images.txt` 需要位于工作站中运行 `rancher-load-images.ps1` 脚本的同一目录中。`rancher-windows-images.tar.gz` 也需要位于同一目录中。
+
+1. 使用 `powershell` 登录到你的私有镜像仓库:
+ ```plain
+ docker login
+ ```
+
+1. 在 `powershell` 中,使用 `rancher-load-images.ps1` 脚本来提取,标记和推送 `rancher-images.tar.gz` 中的镜像到你的私有镜像仓库:
+ ```plain
+ ./rancher-load-images.ps1 --registry
+ ```
+
+## Linux 步骤
+
+Linux 镜像需要在 Linux 主机上收集和推送,但是你必须先将 Windows 镜像推送到私有镜像仓库,然后再推送 Linux 镜像。由于被推送的 Linux 镜像实际上是支持 Windows 和 Linux 镜像的 manifest,因此涉及的步骤不同于只包含 Linux 节点的集群。
+
+1. 找到你的 Rancher 版本所需的资源
+2. 收集所有需要的镜像
+3. 将镜像保存到你的 Linux 工作站中
+4. 推送镜像到私有镜像仓库
+
+### 先决条件
+
+在将 Linux 镜像推送到私有镜像仓库之前,你必须先把 Windows 镜像推送到私有镜像仓库。如果你已经把 Linux 镜像推送到私有镜像仓库,则需要再次按照说明重新推送,因为它们需要发布支持 Windows 和 Linux 镜像的 manifest。
+
+这些步骤要求你使用一个 Linux 工作站,该工作站需要可访问互联网和你的私有镜像仓库,且至少有 20GB 的可用磁盘空间。
+
+工作站必须安装 Docker 18.02+ 版本以提供 manifest 支持。Manifest 支持是配置 Windows 集群所必须的。
+
+
+
+### 1. 找到你的 Rancher 版本所需的资源
+
+1. 访问 Rancher 的[发布说明](https://github.com/rancher/rancher/releases)页面,找到你需要安装的 Rancher v2.x.x 版本。不要下载带有 `rc` 或 `Pre-release` 标记的版本,因为这些版本在生产环境中不够稳定。点击 **Assets**。
+
+2. 从你所需版本的 **Assets** 处下载以下文件:
+
+| Release 文件 | 描述 |
+|----------------------------| -------------------------- |
+| `rancher-images.txt` | 此文件包含安装 Rancher、配置集群和用户 Rancher 工具所需的镜像。 |
+| `rancher-windows-images.txt` | 此文件包含配置 Windows 集群所需的镜像。 |
+| `rancher-save-images.sh` | 该脚本从 Docker Hub 中拉取在文件 `rancher-images.txt` 中记录的所有镜像,并把这些镜像保存为 `rancher-images.tar.gz`。 |
+| `rancher-load-images.sh` | 该脚本从 `rancher-images.tar.gz` 文件中加载镜像,并把镜像推送到你的私有镜像仓库。 |
+
+
+
+### 2. 收集所有需要的镜像
+
+**在 Kubernetes 安装中,如果你使用的是 Rancher 默认的自签名 TLS 证书**,则必须将 [`cert-manager`](https://artifacthub.io/packages/helm/cert-manager/cert-manager) 镜像添加到 `rancher-images.txt` 文件中。如果你使用自己的证书,则可跳过此步骤。
+
+
+1. 获取最新的 `cert-manager` Helm Chart,并解析模板以获取镜像的详情信息:
+
+ :::note
+
+ 由于 cert-manager 的最新改动,你需要升级 cert-manager 版本。如果你需要升级 Rancher 并使用低于 0.12.0 的 cert-manager 版本,请参见[升级文档](../../resources/upgrade-cert-manager.md)。
+
+ :::
+
+ ```plain
+ helm repo add jetstack https://charts.jetstack.io
+ helm repo update
+ helm fetch jetstack/cert-manager
+ helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt
+ ```
+
+2. 对镜像列表进行排序和唯一化,以去除重复的镜像源:
+ ```plain
+ sort -u rancher-images.txt -o rancher-images.txt
+ ```
+
+
+
+### 3. 将镜像保存到你的工作站中
+
+1. 为 `rancher-save-images.sh` 文件添加可执行权限:
+ ```
+ chmod +x rancher-save-images.sh
+ ```
+
+1. 使用 `rancher-images.txt` 镜像列表执行 `rancher-save-images.sh` 脚本,以创建包含所有所需镜像的压缩包:
+ ```plain
+ ./rancher-save-images.sh --image-list ./rancher-images.txt
+ ```
+
+**结果**:Docker 开始拉取用于离线安装的镜像。请耐心等待。这个过程需要几分钟。完成时,你的当前目录会输出名为 `rancher-images.tar.gz` 的压缩包。请确认输出文件是否存在。
+
+
+
+### 4. 推送镜像到私有镜像仓库
+
+使用 `rancher-load-images.sh script` 脚本将 `rancher-images.tar.gz` 中的镜像移动到你的私有镜像仓库,以方便加载镜像。
+
+镜像列表,即 `rancher-images.txt` 或 `rancher-windows-images.txt` 需要位于工作站中运行 `rancher-load-images.sh` 脚本的同一目录中。`rancher-images.tar.gz` 也需要位于同一目录中。
+
+1. 登录到你的私有镜像仓库:
+ ```plain
+ docker login
+ ```
+
+1. 为 `rancher-load-images.sh` 添加可执行权限:
+ ```
+ chmod +x rancher-load-images.sh
+ ```
+
+1. 使用 `rancher-load-images.sh` 脚本来提取,标记和推送 `rancher-images.tar.gz` 中的镜像到你的私有镜像仓库:
+
+```plain
+./rancher-load-images.sh --image-list ./rancher-images.txt \
+ --windows-image-list ./rancher-windows-images.txt \
+ --registry
+```
+
+
+
+
+### [Kubernetes 安装的后续步骤 - 启动 Kubernetes 集群](install-kubernetes.md)
+
+### [Docker 安装的后续步骤 - 安装 Rancher](install-rancher-ha.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
new file mode 100644
index 00000000000..24d8ac7d47c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/other-installation-methods.md
@@ -0,0 +1,19 @@
+---
+title: 其他安装方式
+---
+
+### 离线安装
+
+按照[以下步骤](air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
+
+离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
+
+### Docker 安装
+
+[单节点 Docker 安装](rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
+
+Docker 安装仅用于开发和测试环境。
+
+由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
+
+Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md
new file mode 100644
index 00000000000..bc04713f44a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md
@@ -0,0 +1,249 @@
+---
+title: '2. 安装 Kubernetes'
+---
+
+基础设施配置好后,你可以设置一个 Kubernetes 集群来安装 Rancher。
+
+设置 RKE、RKE2 或 K3s 的步骤如下所示。
+
+为方便起见,将代理的 IP 地址和端口导出到一个环境变量中,并在每个节点上为你当前的 shell 设置 HTTP_PROXY 变量:
+
+```
+export proxy_host="10.0.0.5:8888"
+export HTTP_PROXY=http://${proxy_host}
+export HTTPS_PROXY=http://${proxy_host}
+export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16
+```
+
+
+
+
+首先在 K3s systemd 服务上配置 HTTP 代理设置,让 K3s 的 containerd 可以通过代理拉取镜像:
+
+```
+cat <<'EOF' | sudo tee /etc/default/k3s > /dev/null
+HTTP_PROXY=http://${proxy_host}
+HTTPS_PROXY=http://${proxy_host}"
+NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
+EOF
+```
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/)。
+
+如需指定 K3s(Kubernetes)版本,在运行 K3s 安装脚本时使用 `INSTALL_K3S_VERSION` 环境变量(例如 `INSTALL_K3S_VERSION="v1.24.10+k3s1"`)。
+
+在第一个节点上,创建一个新集群:
+```
+curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION= K3S_TOKEN= sh -s - server --cluster-init
+```
+
+然后加入其他节点:
+```
+curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION= K3S_TOKEN= sh -s - server --server https://:6443
+```
+
+其中 `` 是 Server 的 IP 或有效 DNS,`` 是可以在 `/var/lib/rancher/k3s/server/node-token` 中找到的 Server node-token。
+
+有关安装 K3s 的更多信息,请参阅 [K3s 安装文档](https://docs.k3s.io/installation)。
+
+如需查看集群,请运行以下命令:
+
+```
+kubectl cluster-info
+kubectl get pods --all-namespaces
+```
+
+
+
+
+在每个节点上,运行 RKE2 安装脚本。确保你安装的 RKE2 版本受 [Rancher 支持](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/)。
+
+```
+curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=v1.xx sh -
+```
+
+然后,你必须在 RKE2 systemd 服务上配置 HTTP 代理设置,让 RKE2 的 containerd 可以通过代理拉取镜像:
+
+```
+cat <<'EOF' | sudo tee /etc/default/rke2-server > /dev/null
+HTTP_PROXY=http://${proxy_host}
+HTTPS_PROXY=http://${proxy_host}"
+NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local
+EOF
+```
+
+接下来,按照 [RKE2 高可用性文档](https://docs.rke2.io/install/ha)在每个节点上创建 RKE2 配置文件。
+
+之后启动并启用 `rke2-server` 服务:
+
+```
+systemctl enable rke2-server.service
+systemctl start rke2-server.service
+```
+
+有关安装 RKE2 的更多信息,请参阅 [RKE2 文档](https://docs.rke2.io)。
+
+如需查看集群,请运行以下命令:
+
+```
+export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
+alias kubectl=/var/lib/rancher/rke2/bin/kubectl
+kubectl cluster-info
+kubectl get pods --all-namespaces
+```
+
+
+
+
+首先,你需要在所有三个 Linux 节点上安装 Docker 并设置 HTTP 代理。因此,你可以在这三个节点上执行以下步骤。
+
+接下来配置 apt 以在安装包时使用这个代理。如果你使用的不是 Ubuntu,请相应调整步骤。
+
+```
+cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null
+Acquire::http::Proxy "http://${proxy_host}/";
+Acquire::https::Proxy "http://${proxy_host}/";
+EOF
+```
+
+安装 Docker:
+
+```
+curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh
+```
+
+然后,确保你的当前用户能够在没有 sudo 的情况下访问 Docker Daemon:
+
+```
+sudo usermod -aG docker YOUR_USERNAME
+```
+
+配置 Docker Daemon 使用代理来拉取镜像:
+
+```
+sudo mkdir -p /etc/systemd/system/docker.service.d
+cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null
+[Service]
+Environment="HTTP_PROXY=http://${proxy_host}"
+Environment="HTTPS_PROXY=http://${proxy_host}"
+Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16"
+EOF
+```
+
+要应用配置,请重新启动 Docker Daemon:
+
+```
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+```
+
+#### 离线代理
+
+你现在可以在配置的离线集群中配置主机驱动集群,以使用代理进行出站连接。
+
+除了为代理服务器设置默认规则外,你还需要额外添加如下所示的规则,以从代理的 Rancher 环境中配置主机驱动集群。
+
+根据你的设置配置文件路径,例如 `/etc/apt/apt.conf.d/proxy.conf`:
+
+```
+acl SSL_ports port 22
+acl SSL_ports port 2376
+
+acl Safe_ports port 22 # ssh
+acl Safe_ports port 2376 # docker port
+```
+
+### 创建 RKE 集群
+
+在能通过 SSH 访问 Linux 节点的主机上,你需要有几个命令行工具,来创建集群并与之交互:
+
+* [RKE CLI binary](https://rancher.com/docs/rke/latest/en/installation/#download-the-rke-binary)
+
+```
+sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64
+sudo chmod +x /usr/local/bin/rke
+```
+
+* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+
+```
+curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
+chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+```
+
+接下来,创建一个描述 RKE 集群的 YAML 文件。确保节点的 IP 地址和 SSH 用户名是正确的。有关集群 YAML 的详情,请参见 [RKE 官方文档](https://rancher.com/docs/rke/latest/en/example-yamls/)。
+
+```yml
+nodes:
+ - address: 10.0.1.200
+ user: ubuntu
+ role: [controlplane,worker,etcd]
+ - address: 10.0.1.201
+ user: ubuntu
+ role: [controlplane,worker,etcd]
+ - address: 10.0.1.202
+ user: ubuntu
+ role: [controlplane,worker,etcd]
+
+services:
+ etcd:
+ backup_config:
+ interval_hours: 12
+ retention: 6
+```
+
+之后,你可以通过运行以下命令来创建 Kubernetes 集群:
+
+```
+rke up --config rancher-cluster.yaml
+```
+
+RKE 会创建一个名为 `rancher-cluster.rkestate` 的状态文件。如果你需要更新或修改集群配置,或使用备份恢复集群,则需要使用该文件。RKE 还会创建一个 `kube_config_cluster.yaml` 文件,你可以使用该文件在本地使用 kubectl 或 Helm 等工具连接到远端的 Kubernetes 集群。请将这些文件保存在安全的位置,例如版本控制系统中。
+
+如需查看集群,请运行以下命令:
+
+```
+export KUBECONFIG=kube_config_cluster.yaml
+kubectl cluster-info
+kubectl get pods --all-namespaces
+```
+
+你也可以验证你的外部负载均衡器是否工作,DNS 条目是否设置正确。如果你向其中之一发送请求,你会收到来自 Ingress Controller 的 HTTP 404 响应:
+
+```
+$ curl 10.0.1.100
+default backend - 404
+$ curl rancher.example.com
+default backend - 404
+```
+
+### 保存你的文件
+
+:::note 重要提示:
+
+维护、排除问题和升级集群需要用到以下文件,请妥善保管这些文件:
+
+:::
+
+将以下文件的副本保存在安全位置:
+
+- `rancher-cluster.yml`:RKE 集群配置文件。
+- `kube_config_cluster.yml`:集群的 [Kubeconfig 文件](https://rancher.com/docs/rke/latest/en/kubeconfig/)。该文件包含可完全访问集群的凭证。
+- `rancher-cluster.rkestate`:[Kubernetes 集群状态文件](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state)。此文件包含集群的当前状态,包括 RKE 配置和证书。
+
+:::note
+
+后两个文件名中的 `rancher-cluster` 部分取决于你命名 RKE 集群配置文件的方式。
+
+:::
+
+
+
+
+### 故障排除
+
+参见[故障排除](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)页面。
+
+### 后续操作
+[安装 Rancher](install-rancher.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
new file mode 100644
index 00000000000..7ac7caca50c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md
@@ -0,0 +1,106 @@
+---
+title: 3. 安装 Rancher
+---
+
+在前文的操作后,你已经有了一个运行的 RKE 集群,现在可以在其中安装 Rancher 了。出于安全考虑,所有到 Rancher 的流量都必须使用 TLS 加密。在本教程中,你将使用 [cert-manager](https://cert-manager.io/)自动颁发自签名证书。在实际使用情况下,你可使用 Let's Encrypt 或自己的证书。
+
+### 安装 Helm CLI
+
+
+
+在具有 kubeconfig 的主机上安装 [Helm](https://helm.sh/docs/intro/install/) CLI 以访问 Kubernetes 集群:
+
+```
+curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
+chmod +x get_helm.sh
+sudo ./get_helm.sh
+```
+
+### 安装 cert-manager
+
+添加 cert-manager Helm 仓库:
+
+```
+helm repo add jetstack https://charts.jetstack.io
+```
+
+为 cert-manager 创建命名空间:
+
+```
+kubectl create namespace cert-manager
+```
+
+安装 cert-manager 的 CustomResourceDefinitions:
+
+```
+kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.crds.yaml
+```
+
+使用 Helm 安装 cert-manager。请注意,cert-manager 还需要你配置代理,以防它需要与 Let's Encrypt 或其他外部证书颁发商进行通信:
+
+:::note
+
+要查看自定义 cert-manager 安装的选项(包括集群使用 PodSecurityPolicies 的情况),请参阅 [cert-manager 文档](https://artifacthub.io/packages/helm/cert-manager/cert-manager#configuration)。
+
+:::
+
+```
+helm upgrade --install cert-manager jetstack/cert-manager \
+ --namespace cert-manager \
+ --set http_proxy=http://${proxy_host} \
+ --set https_proxy=http://${proxy_host} \
+ --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local
+```
+
+等待 cert-manager 完成启动:
+
+```
+kubectl rollout status deployment -n cert-manager cert-manager
+kubectl rollout status deployment -n cert-manager cert-manager-webhook
+```
+
+### 安装 Rancher
+
+接下来,你可以安装 Rancher 了。首先,添加 Helm 仓库:
+
+```
+helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
+```
+
+创建命名空间:
+
+```
+kubectl create namespace cattle-system
+```
+
+然后使用 Helm 安装 Rancher:Rancher 也需要你配置代理,以便它可以与外部应用商店通信,或检索 Kubernetes 版本更新元数据:
+
+```
+helm upgrade --install rancher rancher-latest/rancher \
+ --namespace cattle-system \
+ --set hostname=rancher.example.com \
+ --set proxy=http://${proxy_host} \
+ --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local
+```
+
+等待部署完成:
+
+```
+kubectl rollout status deployment -n cattle-system rancher
+```
+
+现在,你可以导航到 `https://rancher.example.com` 并开始使用 Rancher。
+
+:::caution
+
+如果你不想发送遥测数据,在首次登录时退出[遥测](../../../../faq/telemetry.md)。如果在离线安装的环境中让这个功能处于 active 状态,socket 可能无法打开。
+
+:::
+
+### 其他资源
+
+以下资源可能对安装 Rancher 有帮助:
+
+- [Rancher Helm Chart 选项](../../installation-references/helm-chart-options.md)
+- [添加 TLS 密文](../../resources/add-tls-secrets.md)
+- [Rancher Kubernetes 安装的故障排除](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/rancher-behind-an-http-proxy.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/rancher-behind-an-http-proxy.md
new file mode 100644
index 00000000000..1701dba329e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/rancher-behind-an-http-proxy.md
@@ -0,0 +1,13 @@
+---
+title: 使用 HTTP 代理安装 Rancher
+---
+
+很多企业本地运行的服务器或虚拟机不能直接访问互联网,但是出于安全考虑,他们必须通过 HTTP(S) 代理连接到外部服务。本教程将分步介绍如何在这样的环境中进行高可用的 Rancher 安装。
+
+另外,用户也可以在没有任何互联网访问的情况下离线设置 Rancher。详情请参见 [Rancher 官方文档](../air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
+
+## 安装概要
+
+1. [配置基础设施](set-up-infrastructure.md)
+2. [配置 Kubernetes 集群](install-kubernetes.md)
+3. [安装 Rancher](install-rancher.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
new file mode 100644
index 00000000000..f60e31b2c3e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md
@@ -0,0 +1,64 @@
+---
+title: '1. 配置基础设施'
+---
+
+在本节中,你将为 Rancher Management Server 配置底层基础设施,并使其通过 HTTP 代理访问互联网。
+
+如需在高可用 RKE 集群中安装 Rancher Management Server,我们建议配置以下基础设施:
+
+- **3 个 Linux 节点**:可以是你的云提供商(例如 Amazon EC2,GCE 或 vSphere)中的虚拟机。
+- **1 个负载均衡器**:用于将前端流量转发到这三个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+
+这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
+
+### 为什么使用三个节点?
+
+在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
+
+为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
+
+### 1. 配置 Linux 节点
+
+这些主机将通过 HTTP 代理连接到互联网。
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../installation-requirements/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,RKE 工具会部署一个 NGINX Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 NGINX Ingress Controller 监听发往 Rancher 主机名的流量。NGINX Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md)。
+
+:::note 重要提示:
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 3. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
+
+
+### 后续操作
+[配置 Kubernetes 集群](install-kubernetes.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
new file mode 100644
index 00000000000..34ee707431b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md
@@ -0,0 +1,92 @@
+---
+title: 证书故障排除
+---
+
+
+
+### 如何确定我的证书格式是否为 PEM?
+
+你可以通过以下特征识别 PEM 格式:
+
+- 文件开始的标头:
+ ```
+ -----BEGIN CERTIFICATE-----
+ ```
+- 表头后跟一长串字符。
+- 文件结束的页脚:
+ ```
+ -----END CERTIFICATE-----
+ ```
+
+PEM 证书示例:
+
+```
+----BEGIN CERTIFICATE-----
+MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV
+... more lines
+VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
+-----END CERTIFICATE-----
+```
+
+PEM 证书密钥示例:
+
+```
+-----BEGIN RSA PRIVATE KEY-----
+MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV
+... more lines
+VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
+-----END RSA PRIVATE KEY-----
+```
+
+如果你的密钥与以下示例类似,请参见[将 PKCS8 证书密钥转换为 PKCS1](#将-pkcs8-证书密钥转换为-pkcs1)。
+
+```
+-----BEGIN PRIVATE KEY-----
+MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV
+... more lines
+VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
+-----END PRIVATE KEY-----
+```
+
+### 将 PKCS8 证书密钥转换为 PKCS1
+
+如果你使用的是 PKCS8 证书密钥文件,Rancher 将打印以下日志:
+
+```
+ListenConfigController cli-config [listener] failed with : failed to read private key: asn1: structure error: tags don't match (2 vs {class:0 tag:16 length:13 isCompound:true})
+```
+
+为了能正常工作,你需要运行以下命令,将密钥从 PKCS8 转换为 PKCS1:
+
+```
+openssl rsa -in key.pem -out convertedkey.pem
+```
+
+你可使用 `convertedkey.pem` 作为 Rancher 证书密钥文件。
+
+### 添加中间证书的顺序是什么?
+
+添加证书的顺序如下:
+
+```
+-----BEGIN CERTIFICATE-----
+%YOUR_CERTIFICATE%
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+%YOUR_INTERMEDIATE_CERTIFICATE%
+-----END CERTIFICATE-----
+```
+
+### 如何验证我的证书链?
+
+你可使用 `openssl` 二进制文件来验证证书链。如果命令的输出以 `Verify return code: 0 (ok)` 结尾(参见以下示例),你的证书链是有效的。`ca.pem` 文件必须与你添加到 `rancher/rancher` 容器中的文件一致。
+
+如果你使用由可信的 CA 签发的证书,可省略 `-CAfile` 参数。
+
+命令:
+
+```
+openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443
+...
+ Verify return code: 0 (ok)
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
similarity index 81%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-on-a-single-node-with-docker.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
index b8875d47d51..c2a63b86a5e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/rancher-on-a-single-node-with-docker.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md
@@ -3,19 +3,21 @@ title: 使用 Docker 将 Rancher 安装到单个节点中
description: 在开发和测试环境中,你可以使用 Docker 安装。在单个 Linux 主机上安装 Docker,然后使用一个 Docker 容器部署 Rancher。
---
+
+
Rancher 可以通过运行单个 Docker 容器进行安装。
在这种安装方案中,你需要将 Docker 安装到单个 Linux 主机,然后使用单个 Docker 容器将 Rancher 部署到主机中。
:::note 想要使用外部负载均衡器?
-请参阅[使用外部负载均衡器的 Docker 安装](../how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md)。
+请参阅[使用外部负载均衡器的 Docker 安装](../../../../how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md)。
:::
Rancher 的 Docker 安装仅推荐用于开发和测试环境中。Rancher 版本决定了能否将 Rancher 迁移到高可用集群。
-Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
+Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用 Kubernetes 集群上。详情请参见[把 Rancher 迁移到新集群](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
## Rancher 特权访问
@@ -23,11 +25,11 @@ Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用
## 操作系统,Docker,硬件和网络要求
-请确保你的节点满足常规的[安装要求](installation-requirements.md)。
+请确保你的节点满足常规的[安装要求](../../installation-requirements/installation-requirements.md)。
## 1. 配置 Linux 主机
-按照[要求](installation-requirements.md)配置一个 Linux 主机,用于运行 Rancher Server。
+按照[要求](../../installation-requirements/installation-requirements.md)配置一个 Linux 主机,用于运行 Rancher Server。
## 2. 选择一个 SSL 选项并安装 Rancher
@@ -35,10 +37,10 @@ Rancher backup operator 可将 Rancher 从单个 Docker 容器迁移到高可用
:::tip 你是否需要:
-- 使用代理。参见 [HTTP 代理配置](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md)。
-- 配置自定义 CA 根证书以访问服务。参见[自定义 CA 根证书](../reference-guides/single-node-rancher-in-docker/advanced-options.md#自定义-ca-证书)。
-- 完成离线安装。参见 [离线:Docker 安装](air-gapped-helm-cli-install.md)。
-- 记录所有 Rancher API 的事务。参加 [API 审计](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)。
+- 使用代理。参见 [HTTP 代理配置](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md)。
+- 配置自定义 CA 根证书以访问服务。参见[自定义 CA 根证书](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#自定义-ca-证书)。
+- 完成离线安装。参见 [离线:Docker 安装](../air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)。
+- 记录所有 Rancher API 的事务。参加 [API 审计](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)。
:::
@@ -195,13 +197,13 @@ docker run -d --restart=unless-stopped \
- 持久化数据
- 在同一个节点中运行 `rancher/rancher` 和 `rancher/rancher-agent`
-详情请参见[本页](../reference-guides/single-node-rancher-in-docker/advanced-options.md)。
+详情请参见[本页](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md)。
## 故障排除
-如需了解常见问题及故障排除提示,请参见[本页](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+如需了解常见问题及故障排除提示,请参见[本页](certificate-troubleshooting.md)。
## 后续操作
-- **推荐**:检查单节点[备份](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)。你可能暂时没有需要备份的数据,但是我们建议你在常规使用 Rancher 后创建备份。
-- 创建 Kubernetes 集群:[配置 Kubernetes 集群](kubernetes-clusters-in-rancher-setup.md)。
+- **推荐**:检查单节点[备份](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)。你可能暂时没有需要备份的数据,但是我们建议你在常规使用 Rancher 后创建备份。
+- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md
new file mode 100644
index 00000000000..762ec2bd1a5
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md
@@ -0,0 +1,93 @@
+---
+title: 回滚 Docker 安装的 Rancher
+---
+
+
+
+如果 Rancher 升级没有成功完成,你需要回滚到你在 [Docker 升级](upgrade-docker-installed-rancher.md)之前使用的 Rancher 设置。回滚可以恢复:
+
+- 先前版本的 Rancher。
+- 升级前创建的数据备份。
+
+## 在你开始前
+
+在回滚到先前 Rancher 版本的过程中,你将输入一系列命令。请按照你环境的实际情况替换占位符。占位符用尖括号和大写字母(如 ``)表示。以下是带有占位符的命令示例:
+
+```
+docker pull rancher/rancher:
+```
+
+在此命令中,`` 是升级失败之前运行的 Rancher 版本,如 `v2.0.5`。
+
+请交叉参考下方的图片和表格,了解获取此占位符数据的方法。在开始以下步骤之前,请先记下或复制此信息。
+
+终端 docker ps 命令,显示如何找到 <PRIOR_RANCHER_VERSION> 和 <RANCHER_CONTAINER_NAME> 
+
+| 占位符 | 示例 | 描述 |
+| -------------------------- | -------------------------- | ------------------------------------------------------- |
+| `` | `v2.0.5` | 升级前使用的 rancher/rancher 镜像。 |
+| `` | `festive_mestorf` | 你的 Rancher 容器的名称。 |
+| `` | `v2.0.5` | 备份对应的 Rancher 版本。 |
+| `` | `9-27-18` | 数据容器或备份的创建日期。 |
+
+
+可以通过远程连接登录到 Rancher Server 所在的主机并输入命令 `docker ps` 以查看正在运行的容器,从而获得 `` 和 `` 。你还可以运行 `docker ps -a` 命令查看停止了的容器。在创建备份期间,你随时可以运行这些命令来获得帮助。
+
+## 回滚 Rancher
+
+如果你在升级 Rancher 时遇到问题,你可拉取先前使用的镜像并恢复在升级前所做的备份,从而将 Rancher 回滚到之前的正常工作状态。
+
+:::danger
+
+回滚到先前的 Rancher 版本会破坏你在升级后对 Rancher 做出的所有更改。丢失的数据可能无法恢复。
+
+:::
+
+1. 使用远程终端连接,登录到运行 Rancher Server 的节点。
+
+1. 拉取升级前运行的 Rancher 版本。把 `` 替换为该版本。
+
+ 例如,如果升级之前运行的是 Rancher v2.0.5,请拉取 v2.0.5。
+
+ ```
+ docker pull rancher/rancher:
+ ```
+
+1. 停止当前运行 Rancher Server 的容器。将 `` 替换为你的 Rancher 容器的名称:
+
+ ```
+ docker stop
+ ```
+ 你可输入 `docker ps`获取 Rancher 容器的名称。
+
+1. 将你在 [Docker 升级](upgrade-docker-installed-rancher.md)时创建的备份压缩包移动到 Rancher Server。切换到你将其移动到的目录。输入 `dir` 以确认它在该位置。
+
+ 如果你遵循了我们在 [Docker 升级](upgrade-docker-installed-rancher.md)中推荐的命名方式,它的名称会与 `rancher-data-backup--.tar.gz` 类似。
+
+1. 替换占位符来运行以下命令,将 `rancher-data` 容器中的数据替换为备份压缩包中的数据。不要忘记关闭引号。
+
+ ```
+ docker run --volumes-from rancher-data \
+ -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \
+ && tar zxvf /backup/rancher-data-backup--.tar.gz"
+ ```
+
+1. 将 `` 占位符指向数据容器,启动一个新的 Rancher Server 容器。
+ ```
+ docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --privileged \
+ rancher/rancher:
+ ```
+ 特权访问是[必须](rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+ :::danger
+
+ 启动回滚后,即使回滚耗时比预期长,也 **_不要_** 停止回滚。如果你停止回滚,可能会导致之后的升级中出现数据库错误。
+
+ :::
+
+1. 等待片刻,然后在浏览器中打开 Rancher。确认回滚成功并且你的数据已恢复。
+
+**结果**:Rancher 回滚到升级前的版本和数据状态。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
new file mode 100644
index 00000000000..7dbef6d2cab
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md
@@ -0,0 +1,393 @@
+---
+title: 升级 Docker 安装的 Rancher
+---
+
+本文介绍如何升级通过 Docker 安装的 Rancher Server。
+
+:::caution
+
+**生产环境不支持 Docker 安装**。这些说明仅适用于测试和开发。如果你已经在生产环境中部署了 Docker 安装并且需要升级到新的 Rancher 版本,我们建议你在升级前先[迁移到 Helm Chart 安装](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)。
+
+:::
+
+## 先决条件
+
+- 在 Rancher 文档中**检查[已知升级问题](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md#已知升级问题)**,了解升级 Rancher 时最需要注意的问题。你可以在 [GitHub](https://github.com/rancher/rancher/releases) 发布说明以及 [Rancher 论坛](https://forums.rancher.com/c/announcements/12)中找到每个 Rancher 版本的已知问题。不支持升级或升级到 [rancher-alpha 仓库](../../resources/choose-a-rancher-version.md#helm-chart-仓库)中的任何 Chart。
+- **[仅适用于离线安装](../air-gapped-helm-cli-install/air-gapped-helm-cli-install.md):为新的 Rancher Server 版本收集和推送镜像**。按照指南为你想要升级的目标 Rancher 版本[推送镜像到私有镜像仓库](../air-gapped-helm-cli-install/publish-images.md)。
+
+## 占位符
+
+在升级过程中,你将输入一系列命令。请按照你环境的实际情况替换占位符。占位符用尖括号和大写字母(如 ``)表示。
+
+以下是带有占位符的命令**示例**:
+
+```
+docker stop
+```
+
+在此命令中,`` 是你的 Rancher 容器的名称。
+
+## 获取升级命令的数据
+
+要获取替换占位符的数据,请运行:
+
+```
+docker ps
+```
+
+在开始升级之前记下或复制此信息。
+
+终端 docker ps 命令,显示如何找到 <RANCHER_CONTAINER_TAG> 和 <RANCHER_CONTAINER_NAME>
+
+
+
+| 占位符 | 示例 | 描述 |
+| -------------------------- | -------------------------- | --------------------------------------------------------- |
+| `` | `v2.1.3` | 首次安装拉取的 rancher/rancher 镜像。 |
+| `` | `festive_mestorf` | 你的 Rancher 容器的名称。 |
+| `` | `v2.1.3` | 你为其创建备份的 Rancher 版本。 |
+| `` | `2018-12-19` | 数据容器或备份的创建日期。 |
+
+
+可以通过远程连接登录到 Rancher Server 所在的主机并输入命令 `docker ps` 以查看正在运行的容器,从而获得 `` 和 ``。你还可以运行 `docker ps -a` 命令查看停止了的容器。在创建备份期间,你随时可以运行这些命令来获得帮助。
+
+## 升级
+
+在升级期间,你可以为当前 Rancher 容器创建数据的副本及备份,以确保可以在升级出现问题时可以进行回滚。然后,你可使用现有数据将新版本的 Rancher 部署到新容器中。
+### 1. 创建 Rancher Server 容器的数据副本
+
+1. 使用远程终端连接,登录到运行 Rancher Server 的节点。
+
+1. 停止正在运行 Rancher Server 的容器。将 `` 替换为你的 Rancher 容器的名称:
+
+ ```
+ docker stop
+ ```
+
+1. 运行以下命令,从刚才停止的 Rancher 容器创建一个数据容器。请替换命令中的占位符:
+
+ ```
+ docker create --volumes-from --name rancher-data rancher/rancher:
+ ```
+
+### 2. 创建备份压缩包
+
+1. 从你刚刚创建的数据容器(rancher-data)中,创建一个备份 tar 包(rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz)。
+
+ 如果升级期间出现问题,此压缩包可以用作回滚点。替换占位符来运行以下命令。
+ ```
+ docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher
+ ```
+
+ **步骤结果**:你输入此命令时,会运行一系列命令。
+
+1. 输入 `ls` 命令,确认备份压缩包已创建成功。压缩包的名称格式类似 `rancher-data-backup--.tar.gz`。
+
+ ```
+ [rancher@ip-10-0-0-50 ~]$ ls
+ rancher-data-backup-v2.1.3-20181219.tar.gz
+ ```
+
+1. 将备份压缩包移动到 Rancher Server 外的安全位置。
+
+### 3. 拉取新的 Docker 镜像
+
+拉取你需要升级到的 Rancher 版本镜像。
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+docker pull rancher/rancher:
+```
+
+### 4. 启动新的 Rancher Server 容器
+
+使用 `rancher-data` 容器中的数据启动一个新的 Rancher Server 容器。记住要传入启动原始容器时使用的所有环境变量。
+
+:::danger
+
+启动升级后,即使升级耗时比预期长,也 **_不要_** 停止升级。如果你停止升级,可能会导致之后的升级中出现数据库迁移错误。
+
+:::
+
+如果你使用代理,请参见 [HTTP 代理配置](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md)。
+
+如果你配置了自定义 CA 根证书来访问服务,请参见[自定义 CA 根证书](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#自定义-ca-证书)。
+
+如果你要记录所有 Rancher API 的事务,请参见 [API 审计](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)。
+
+如需查看启动新 Rancher Server 容器时使用的命令,从以下的选项中进行选择:
+
+- Docker 升级
+- 离线安装的 Docker 升级
+
+
+
+
+选择你安装 Rancher Server 时用的选项
+
+#### 选项 A:使用 Rancher 默认的自签名证书
+
+
+ 单击展开
+
+如果你使用 Rancher 生成的自签名证书,则将 `--volumes-from rancher-data` 添加到你启动原始 Rancher Server 容器的命令中。
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --privileged \
+ rancher/rancher:
+```
+
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+
+#### 选项 B:使用你自己的证书 - 自签名
+
+
+单击展开
+
+如果你选择使用自己的自签名证书,则在启动原始 Rancher Server 容器的命令中添加 `--volumes-from rancher-data`。此外,你需要能够访问你原始安装时使用的证书。
+
+:::note 证书要求提示:
+
+证书文件的格式必须是 PEM。在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。
+
+:::
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | CA 证书的路径。 |
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ -v //:/etc/rancher/ssl/cacerts.pem \
+ --privileged \
+ rancher/rancher:
+```
+
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+
+#### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
+
+
+ 单击展开
+
+如果你选择使用可信 CA 签名的证书,则在启动原始 Rancher Server 容器的命令中添加 `--volumes-from rancher-data`。此外,你需要能够访问你原始安装时使用的证书。注意要使用 `--no-cacerts` 作为容器的参数,以禁用 Rancher 生成的默认 CA 证书。
+
+:::note 证书要求提示:
+
+证书文件的格式必须是 PEM。在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](certificate-troubleshooting.md)。
+
+:::
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ --privileged \
+ rancher/rancher: \
+ --no-cacerts
+```
+
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+#### 选项 D:Let's Encrypt 证书
+
+
+ 单击展开
+
+:::caution
+
+Let's Encrypt 对新证书请求有频率限制。因此,请限制创建或销毁容器的频率。详情请参见 [Let's Encrypt 官方文档 - 频率限制](https://letsencrypt.org/docs/rate-limits/)。
+
+:::
+
+如果你选择使用 [Let's Encrypt](https://letsencrypt.org/) 证书,则在启动原始 Rancher Server 容器的命令中添加 `--volumes-from rancher-data`,并且提供最初安装 Rancher 时使用的域名。
+
+:::note 证书要求提示:
+
+- 在 DNS 中创建一条记录,将 Linux 主机 IP 地址绑定到要用于访问 Rancher 的主机名(例如,`rancher.mydomain.com`)。
+- 在 Linux 主机上打开 `TCP/80` 端口。Let's Encrypt 的 HTTP-01 质询可以来自任何源 IP 地址,因此端口 `TCP/80` 必须开放开所有 IP 地址。
+
+:::
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+| `` | 你最初使用的域名 |
+
+```
+docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --privileged \
+ rancher/rancher: \
+ --acme-domain
+```
+
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+
+
+
+
+出于安全考虑,使用 Rancher 时请使用 SSL(Secure Sockets Layer)。SSL 保护所有 Rancher 网络通信(如登录和与集群交互)的安全。
+
+启动新的 Rancher Server 容器时,从以下的选项中进行选择:
+
+#### 选项 A:使用 Rancher 默认的自签名证书
+
+
+ 单击展开
+
+如果你使用 Rancher 生成的自签名证书,则将 `--volumes-from rancher-data` 添加到你启动原始 Rancher Server 容器的命令中。
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+ docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged \
+ /rancher/rancher:
+```
+
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+#### 选项 B:使用你自己的证书 - 自签名
+
+
+ 单击展开
+
+如果你选择使用自己的自签名证书,则在启动原始 Rancher Server 容器的命令中添加 `--volumes-from rancher-data`。此外,你需要能够访问你原始安装时使用的证书。
+
+:::note 证书要求提示:
+
+证书文件的格式必须是 PEM。在你的证书文件中,包括链中的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](certificate-troubleshooting.md)。
+
+:::
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | CA 证书的路径。 |
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ -v //:/etc/rancher/ssl/cacerts.pem \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged \
+ /rancher/rancher:
+```
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+#### 选项 C:使用你自己的证书 - 可信 CA 签名的证书
+
+
+ 单击展开
+
+如果你选择使用可信 CA 签名的证书,则在启动原始 Rancher Server 容器的命令中添加 `--volumes-from rancher-data`。此外,你需要能够访问你原始安装时使用的证书。
+
+:::note 证书要求提示:
+
+证书文件的格式必须是 PEM。在你的证书文件中,包括可信 CA 提供的所有中间证书。你需要对你的证书进行排序,把你的证书放在最前面,后面跟着中间证书。如需查看示例,请参见[证书故障排除](certificate-troubleshooting.md)。
+
+:::
+
+| 占位符 | 描述 |
+------------|-------------
+| `` | 包含证书文件的目录的路径。 |
+| `` | 完整证书链的路径。 |
+| `` | 证书私钥的路径。 |
+| `` | 私有镜像仓库的 URL 和端口。 |
+| `` | 你想要升级到的 [Rancher 版本](../../installation-references/helm-chart-options.md)的版本标签。 |
+
+:::note
+
+使用 `--no-cacerts` 作为容器的参数,以禁用 Rancher 生成的默认 CA 证书。
+
+:::
+
+```
+docker run -d --volumes-from rancher-data \
+ --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --no-cacerts \
+ -v //:/etc/rancher/ssl/cert.pem \
+ -v //:/etc/rancher/ssl/key.pem \
+ -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ -e CATTLE_SYSTEM_CATALOG=bundled \ # 使用打包的 Rancher System Chart
+ --privileged
+ /rancher/rancher:
+```
+特权访问是[必须](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)的。
+
+
+
+
+
+**结果**:你已升级 Rancher。已升级 Server 中的数据将保存在 `rancher-data` 容器中,用于将来的升级。
+
+### 5. 验证升级
+
+登录到 Rancher。通过检查浏览器左下角的版本号,确认升级是否成功。
+
+:::note 升级后下游集群出现网络问题?
+
+请参见[恢复集群网络](/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md)。
+
+:::
+
+### 6. 清理旧的 Rancher Server 容器
+
+移除旧的 Rancher Server 容器。如果你仅停止了旧的 Rancher Server 容器,但没有移除它,该容器还可能在服务器下次重启后重新启动。
+
+## 回滚
+
+如果升级没有成功完成,你可以将 Rancher Server 及其数据回滚到上次的健康状态。详情请参见 [Docker 回滚](roll-back-docker-installed-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/add-tls-secrets.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/add-tls-secrets.md
new file mode 100644
index 00000000000..87f320d0dca
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/add-tls-secrets.md
@@ -0,0 +1,45 @@
+---
+title: 添加 TLS 密文
+---
+
+我们使用证书和密钥将 `cattle-system` 命名空间中的 `tls-rancher-ingress` 密文配置好后,Kubernetes 会为 Rancher 创建对象和服务。
+
+将服务器证书和所需的所有中间证书合并到名为 `tls.crt`的文件中。将证书密钥复制到名为 `tls.key` 的文件中。
+
+例如,[acme.sh](https://acme.sh) 在 `fullchain.cer` 文件中提供服务器证书和 CA 链。
+请将 `fullchain.cer` 命名为 `tls.crt`,将证书密钥文件命名为 `tls.key`。
+
+使用 `kubectl` 创建 `tls` 类型的密文。
+
+```
+kubectl -n cattle-system create secret tls tls-rancher-ingress \
+ --cert=tls.crt \
+ --key=tls.key
+```
+
+:::note
+
+如需替换证书,你可以运行 `kubectl -n cattle-system delete secret tls-rancher-ingress` 来删除 `tls-rancher-ingress` 密文,然后运行上方命令来添加新的密文。如果你使用的是私有 CA 签名证书,仅当新证书与当前证书是由同一个 CA 签发的,才可以替换。
+
+:::
+
+## 使用私有 CA 签名证书
+
+如果你使用的是私有 CA,Rancher 需要私有 CA 的根证书或证书链的副本,Rancher Agent 使用它来校验与 Server 的连接。
+
+创建一个名为 `cacerts.pem` 的文件,该文件仅包含私有 CA 的根 CA 证书或证书链,并使用 `kubectl` 在 `cattle-system` 命名空间中创建 `tls-ca` Secret。
+
+```
+kubectl -n cattle-system create secret generic tls-ca \
+ --from-file=cacerts.pem=./cacerts.pem
+```
+
+:::note
+
+Rancher 启动时会检索配置的 `tls-ca` 密文。如果 Rancher 在运行中,更新的 CA 会在新的 Rancher Pod 启动后生效。
+
+:::
+
+## 更新私有 CA 证书
+
+按照[步骤](update-rancher-certificate.md)更新 [Rancher 高可用 Kubernetes 安装](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)中的 Ingress,或从默认自签名证书切换到自定义证书。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/bootstrap-password.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/bootstrap-password.md
new file mode 100644
index 00000000000..249b14115a0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/bootstrap-password.md
@@ -0,0 +1,27 @@
+---
+title: 引导密码
+---
+
+Rancher 首次启动时,会为第一个管理员用户随机生成一个密码。当管理员首次登录 Rancher 时,用于获取引导密码(Bootstrap)的命令会在 UI 上显示。管理员需要运行命令并使用引导密码登录。然后 Rancher 会让管理员重置密码。
+
+如果你在安装过程中没有使用变量来设置引导密码,则会随机生成引导密码。如需了解使用变量设置引导密码的详情,请参见下文。
+
+### 在 Helm 安装中指定引导密码
+
+Helm 安装的情况下,你可以使用 `.Values.bootstrapPassword` 在 Helm Chart 值中指定引导密码变量。
+
+密码将存储在 Kubernetes 密文中。安装 Rancher 后,如何使用 kubectl 获取密码的说明将会在 UI 中显示:
+
+```
+kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}'
+```
+
+### 在 Docker 安装中指定引导密码
+
+如果 Rancher 是使用 Docker 安装的,你可以通过在 Docker 安装命令中传递 `-e CATTLE_BOOTSTRAP_PASSWORD=password` 来指定引导密码。
+
+密码将存储在 Docker 容器日志中。安装 Rancher 后,如何使用 Docker 容器 ID 获取密码的说明将会在 UI 中显示:
+
+```
+docker logs container-id 2>&1 | grep "Bootstrap Password:"
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
new file mode 100644
index 00000000000..7ddfcef2d8e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md
@@ -0,0 +1,118 @@
+---
+title: 选择 Rancher 版本
+---
+
+本节介绍如何选择 Rancher 版本。
+
+在我们推荐用于生产环境的 Rancher 高可用安装中,Rancher Server 是通过 Kubernetes 集群上的 **Helm Chart** 安装的。请参见 [Helm 版本要求](helm-version-requirements.md)选择 Helm 版本来安装 Rancher。
+
+如果你在开发和测试中使用 Docker 来安装 Rancher,你需要把 Rancher 作为一个 **Docker 镜像**来安装。
+
+
+
+
+如果 Rancher Server 是[安装在 Kubernetes 集群上](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)的,Rancher Server 的安装,升级和回滚中,都是使用 Kubernetes 集群上的 Helm Chart 来安装 Rancher 的。因此,在准备安装或升级 Rancher 高可用时,必须添加包含用于安装 Rancher 的 Chart 的 Helm Chart 仓库。
+
+请参见 [Helm 版本要求](helm-version-requirements.md)选择 Helm 版本来安装 Rancher。
+
+### Helm Chart 仓库
+
+Rancher 提供几个可选的 Helm Chart 仓库供你选择。最新版或稳定版的 Helm Chart 仓库与用于 Docker 安装中的 Docker 标签对应。因此,`rancher-latest` 仓库包含所有标记为 `rancher/rancher:latest` 的 Rancher 版本 Chart。当 Rancher 版本升级到 `rancher/rancher:stable`,它会被添加到 `rancher-stable` 仓库中。
+
+| 类型 | 添加仓库的命令 | 仓库描述 |
+| -------------- | ------------ | ----------------- |
+| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | 添加最新版本的 Rancher 的 Helm Chart 仓库。建议使用此仓库来测试新版本的 Rancher。 |
+| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | 添加较旧的,稳定的版本的 Rancher 的 Helm Chart 仓库。建议在生产环境中使用此仓库。 |
+| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | 添加 alpha 版本的 Rancher 的 Helm Chart 仓库,以预览即将发布的版本。不建议在生产环境中使用这些版本。无论是什么仓库,均不支持 _升级_ 或 _升级到_ rancher-alpha 仓库中的任何 Chart。 |
+
+了解何时选择这些仓库,请参见[切换到不同 Helm Chart 仓库](#切换到不同-helm-chart-仓库)。
+
+:::note
+
+`rancher-stable` 仓库中的所有 Chart 都与 `stable` 标记的 Rancher 版本对应。
+
+:::
+
+### Helm Chart 版本
+
+Rancher Helm Chart 版本与 Rancher 版本(即 `appVersion`)对应。添加仓库后,你可以运行以下命令搜索可用版本:
+ `helm search repo --versions`
+
+如果你有多个仓库,你可指定仓库名称,即:`helm search repo rancher-stable/rancher --versions`
+详情请访问 https://helm.sh/docs/helm/helm_search_repo/
+
+要获取所选仓库的指定版本,参见如下示例指定 `--version` 参数:
+ `helm fetch rancher-stable/rancher --version=2.4.8`
+
+### 切换到不同 Helm Chart 仓库
+
+安装 Rancher 后,如果想修改安装 Rancher 的 Helm Chart 仓库,按照以下步骤操作。
+
+:::note
+
+由于 rancher-alpha 仓库只包含 alpha 版本 Chart,因此不支持从 rancher alpha 仓库切换到 rancher-stable 或 rancher-latest 仓库以进行升级。
+
+:::
+
+- Latest:建议用于试用最新功能
+ ```
+ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
+ ```
+- Stable:建议用于生产环境
+ ```
+ helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
+ ```
+- Alpha:即将发布的实验性预览。
+ ```
+ helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha
+ ```
+ 注意:不支持升级到 Alpha 版、从 Alpha 版升级或在 Alpha 版之间升级。
+
+1. 列出当前 Helm Chart 仓库。
+
+ ```plain
+ helm repo list
+
+ NAME URL
+ stable https://charts.helm.sh/stable
+ rancher- https://releases.rancher.com/server-charts/
+ ```
+
+2. 删除包含安装 Rancher 时用的 Chart 的 Helm Chart 仓库。是 `rancher-stable` 或 `rancher-latest` 取决于你初始安装时的选择。
+
+ ```plain
+ helm repo remove rancher-
+ ```
+
+3. 添加你要用于安装 Rancher 的 Helm Chart 仓库。
+
+ ```plain
+ helm repo add rancher- https://releases.rancher.com/server-charts/
+ ```
+
+4. 按照以下步骤,从新的 Helm Chart 仓库[升级 Rancher](../install-upgrade-on-a-kubernetes-cluster/upgrades.md)。
+
+
+
+
+在执行 [Docker 安装](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)、升级或回滚时,你可以使用 _tags_ 来安装特定版本的 Rancher。
+
+### Server 标签
+
+Rancher Server 以 Docker 镜像的形式分发并附有标签。你可以在输入命令部署 Rancher 时指定标签。请记住,如果你指定了标签,但是没有指定版本(如 `latest` 或 `stable`),你必须显式拉取该镜像标签的新版本。否则,将使用缓存在主机上的镜像。
+
+| 标签 | 描述 |
+| -------------------------- | ------ |
+| `rancher/rancher:latest` | 最新的开发版本。这些版本通过了我们的 CI 自动化验证。不推荐在生产环境使用这些版本。 |
+| `rancher/rancher:stable` | 最新的稳定版本。推荐将此标签用于生产环境。 |
+| `rancher/rancher:` | 你可以使用以前版本中的标签来指定要安装的 Rancher 版本。访问 DockerHub 查看可用的版本。 |
+
+:::note
+
+- `master` 和带有 `-rc` 或其他后缀的标签是供 Rancher 测试团队验证用的。这些标签不受官方支持,因此请不要使用这些标签。
+- 安装 alpha 版本进行预览:使用我们的[公告页面](https://forums.rancher.com/c/announcements)中列出的 alpha 标签(例如,`v2.2.0-alpha1`)进行安装。不支持升级或升级到 Alpha 版本。
+
+:::
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md
new file mode 100644
index 00000000000..d454197c8a5
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md
@@ -0,0 +1,24 @@
+---
+title: 自定义 CA 根证书
+---
+
+如果你在内部生产环境使用 Rancher,且不打算公开暴露应用,你可以使用使用私有 CA 颁发的证书。
+
+Rancher 可能会访问配置了自定义/内部 CA 根证书(也称为自签名证书)的服务。如果 Rancher 无法验证服务的证书,则会显示错误信息 `x509: certificate signed by unknown authority`。
+
+如需验证证书,你需要把 CA 根证书添加到 Rancher。由于 Rancher 是用 Go 编写的,我们可以使用环境变量 `SSL_CERT_DIR` 指向容器中 CA 根证书所在的目录。启动 Rancher 容器时,可以使用 Docker 卷选项(`-v host-source-directory:container-destination-directory`)来挂载 CA 根证书目录。
+
+Rancher 可以访问的服务示例:
+
+- 应用商店
+- 验证提供程序
+- 使用 Node Driver 访问托管/云 API
+
+## 使用自定义 CA 证书安装
+
+有关启动挂载了私有 CA 证书的 Rancher 容器的详情,请参见安装文档:
+
+- [Docker 安装的自定义 CA 证书选项](../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#自定义-ca-证书)
+
+- [Kubernetes 安装的其他受信 CA 选项](../installation-references/helm-chart-options.md#额外的授信-ca)
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/helm-version-requirements.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/helm-version-requirements.md
new file mode 100644
index 00000000000..9a1fc4d1078
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/helm-version-requirements.md
@@ -0,0 +1,14 @@
+---
+title: Helm 版本要求
+---
+
+本文介绍 Helm 的要求。Helm 是用于把 Rancher 安装在高可用 Kubernetes 集群上的工具。
+
+> 我们已针对 Helm 3 更新了安装指南。如果你使用 Helm 2 进行安装,请参见 [Helm 2 迁移到 Helm 3 文档](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/)。[本文](/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm2.md)提供了较早的使用 Helm 2 的 Rancher 高可用安装指南的副本。如果你如果无法升级到 Helm 3,可以使用这个说明安装。
+
+
+
+- 如需安装或升级 Rancher 2.5,请使用 Helm 3.2.x 或更高版本。
+- Kubernetes 1.16 要求 Helm 2.16.0 或更高版本。如果使用的是默认 Kubernetes 版本,请参见[发行说明](https://github.com/rancher/rke/releases)获取所使用的 RKE 版本。
+- 请不要使用 Helm 2.15.0,因为这个版本有转换/比较数字的问题。
+- 请不要使用 Helm 2.12.0,因为该版本有 `cert-manager` 的兼容问题。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/local-system-charts.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/local-system-charts.md
new file mode 100644
index 00000000000..17c965ad095
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/local-system-charts.md
@@ -0,0 +1,13 @@
+---
+title: 离线安装中设置本地 System Charts
+---
+
+[Charts](https://github.com/rancher/charts) 仓库包含 Monitoring、Logging、告警和 Istio 等功能所需的所有 Helm 目录项。
+
+在 Rancher 的离线安装中,你需要配置 Rancher 以使用 System Charts 的本地副本。本节介绍如何通过 CLI 标志使用本地 System Charts。
+
+## 使用本地 System Charts
+
+`system-charts` 的一个本地副本已经打包到 `rancher/rancher` 容器中。为了在离线安装中使用这些功能,你需要使用额外的环境变量 `CATTLE_SYSTEM_CATALOG=bundled` 来运行 Rancher 安装命令,该环境变量告诉 Rancher 使用 Chart 的本地副本,而不是尝试从 GitHub 获取 Chart。
+
+带有 `system-charts` 的 Rancher 安装命令示例包含在 Docker 和 Helm 的[离线安装说明](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/resources.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/resources.md
new file mode 100644
index 00000000000..2862aba2cd2
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/resources.md
@@ -0,0 +1,25 @@
+---
+title: 资源
+---
+
+### Docker 安装
+
+[单节点 Docker 安装](../other-installation-methods/rancher-on-a-single-node-with-docker/rancher-on-a-single-node-with-docker.md)适用于想要测试 Rancher 的用户。你无需使用 Helm 在 Kubernetes 集群上运行 Rancher,你可以使用 `docker run` 命令,把 Rancher Server 组件安装到单个节点上。
+
+由于只有一个节点和一个 Docker 容器,因此,如果该节点发生故障,由于其他节点上没有可用的 etcd 数据副本,你将丢失 Rancher Server 的所有数据。
+
+### 离线安装
+
+按照[以下步骤](../other-installation-methods/air-gapped-helm-cli-install/air-gapped-helm-cli-install.md)在离线环境中安装 Rancher Server。
+
+离线环境可以是 Rancher Server 离线安装、防火墙后面或代理后面。
+
+### 高级选项
+
+安装 Rancher 时,有如下几个可开启的高级选项:每个安装指南中都提供了对应的选项。了解选项详情:
+
+- [自定义 CA 证书](custom-ca-root-certificates.md)
+- [API 审计日志](../../../how-to-guides/advanced-user-guides/enable-api-audit-log.md)
+- [TLS 设置](../installation-references/tls-settings.md)
+- [etcd 配置](../../../how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md)
+- [离线安装 Local System Chart](local-system-charts.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
new file mode 100644
index 00000000000..c5eb98f8aaf
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md
@@ -0,0 +1,263 @@
+---
+title: 更新 Rancher 证书
+---
+
+## 更新私有 CA 证书
+
+按照以下步骤轮换[安装在 Kubernetes 集群上](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)、由 Rancher 使用的 SSL 证书和私有 CA,或转用由私有 CA 签发的 SSL 证书。
+
+步骤概述:
+
+1. 使用新证书和私钥创建或更新 `tls-rancher-ingress` Kubernetes Secret 对象。
+1. 使用根 CA 证书创建或更新 `tls-ca` Kubernetes Secret 对象(仅在使用私有 CA 时需要)。
+1. 使用 Helm CLI 更新 Rancher 安装。
+1. 重新配置 Rancher Agent 以信任新的 CA 证书。
+1. 选择 Fleet 集群的强制更新,来将 fleet-agent 连接到 Rancher。
+
+各个步骤的详细说明如下。
+
+### 1. 创建/更新证书 Secret 对象
+
+首先,将服务器证书和所有中间证书合并到名为 `tls.crt` 的文件,并在名为 `tls.key` 的文件中提供相应的证书密钥。
+
+使用以下命令在 Rancher(本地)管理集群中创建 `tls-rancher-ingress` Secret 对象:
+
+```bash
+kubectl -n cattle-system create secret tls tls-rancher-ingress \
+ --cert=tls.crt \
+ --key=tls.key
+```
+
+或者,更新现有的 `tls-rancher-ingress` Secret:
+
+```bash
+kubectl -n cattle-system create secret tls tls-rancher-ingress \
+ --cert=tls.crt \
+ --key=tls.key \
+ --dry-run --save-config -o yaml | kubectl apply -f -
+```
+
+### 2. 创建/更新证书 CA Secret 对象
+
+如果新证书由私有 CA 签发的,你需要将相应的根 CA 证书复制到名为 `cacerts.pem` 的文件中,并创建或更新 `cattle-system` 命名空间中的 `tls-ca` Secret。如果证书由中间 CA 签名,则 `cacerts.pem` 必须按顺序同时包含中间 CA 证书和根 CA 证书。
+
+创建初始 `tls-ca` Secret:
+
+```bash
+kubectl -n cattle-system create secret generic tls-ca \
+ --from-file=cacerts.pem
+```
+
+要更新现有的 `tls-ca` Secret:
+
+```bash
+kubectl -n cattle-system create secret generic tls-ca \
+ --from-file=cacerts.pem \
+ --dry-run --save-config -o yaml | kubectl apply -f -
+```
+
+### 3. 重新配置 Rancher 部署
+
+如果证书源保持不变(例如,`secret`),请按照步骤 3a 中的步骤操作。
+
+但是,如果证书源发生变化(例如,`letsEncrypt` 更改为 `secret`),请按照 3b 中的步骤操作。
+
+#### 3a. 重新部署 Rancher pod
+
+当证书源保持不变,但需要更新 CA 证书时,需要执行此步骤。
+
+在这种情况下,由于 `tls-ca` secret 在启动时由 Rancher pod 读取,因此你需要重新部署 Rancher pod。
+
+你可以运行以下命令重新部署 Rancher pod:
+```bash
+kubectl rollout restart deploy/rancher -n cattle-system
+```
+
+修改完成后,访问 `https:///v3/settings/cacerts`,验证该值是否与先前写入 `tls-ca` Secret 中的 CA 证书匹配。在所有重新部署的 Rancher pod 启动之前,CA `cacerts` 值可能不会更新。
+
+#### 3b. 更新 Rancher 的 Helm 值
+
+如果证书源有更改,则需要执行此步骤。如果你的 Rancher 之前使用默认的自签名证书 (`ingress.tls.source=rancher`) 或 Let's Encrypt (`ingress.tls.source=letsEncrypt`) 证书,并且现在正在使用由私有 CA (`ingress.tls.source=secret`) 签名的证书。
+
+以下步骤更新了 Rancher Chart 的 Helm 值,因此 Rancher pod 和 ingress 会使用在步骤 1 和 2 中创建的新私有 CA 证书。
+
+1. 调整初始安装期间使用的值,将当前值存储为:
+```bash
+helm get values rancher -n cattle-system -o yaml > values.yaml
+```
+1. 检索当前部署的 Rancher Chart 的版本字符串:
+```bash
+helm ls -n cattle-system
+```
+1. 更新 `values.yaml` 文件中的当前 Helm 值以包含下方内容:
+```yaml
+ingress:
+ tls:
+ source: secret
+privateCA: true
+```
+:::note 重要:
+由于证书由私有 CA 签发,因此确保在 `values.yaml` 文件中设置了 [`privateCA: true`](../installation-references/helm-chart-options.md#常用选项) 是非常重要的。
+:::
+1. 使用 `values.yaml` 文件和当前 Chart 版本升级 Helm 应用程序实例。版本必须匹配以防止 Rancher 升级。
+```bash
+ helm upgrade rancher rancher-stable/rancher \
+ --namespace cattle-system \
+ -f values.yaml \
+ --version
+```
+
+修改完成后,访问 `https:///v3/settings/cacerts`,验证该值是否与先前写入 `tls-ca` Secret 中的 CA 证书匹配。在所有 Rancher pod 启动之前,CA `cacerts` 值可能不会更新。
+
+### 4. 重新配置 Rancher Agent 以信任私有 CA
+
+本节介绍了重新配置 Rancher Agent 以信任私有 CA 的三种方法。如果你的实际情况符合以下任意一个条件,请执行此步骤:
+
+- Rancher 在先前的配置中使用了 Rancher 自签名证书 (`ingress.tls.source=rancher`) 或 Let's Encrypt 证书 (`ingress.tls.source=letsEncrypt`)。
+- 该证书由不同的私有 CA 签发
+
+#### 为什么要执行这一步骤?
+
+如果 Rancher 配置了私有 CA 签名的证书时,CA 证书链将受到 Rancher agent 容器的信任。Agent 会对下载证书的校验和及 `CATTLE_CA_CHECKSUM` 环境变量进行比较。换言之,如果 Rancher 使用的私有 CA 证书发生变化,环境变量 `CATTLE_CA_CHECKSUM` 必须相应更新。
+
+#### 可使用的方法
+
+- 方法 1(最简单的方法):在轮换证书后将所有集群连接到 Rancher。适用于更新或重新部署 Rancher 部署(步骤 3)后立即执行的情况。
+
+- 方法 2:适用于集群与 Rancher 失去连接,但所有集群都启用了 [Authorized Cluster Endpoint](../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md) (ACE) 的情况。
+
+- 方法 3:如果方法 1 和 2 不可行,则可使用方法 3 进行回退。
+
+#### 方法 1:强制重新部署 Rancher Agent
+
+对于每个下游集群,使用 Rancher(本地)管理集群的 Kubeconfig 文件运行以下命令。
+
+```bash
+kubectl annotate clusters.management.cattle.io io.cattle.agent.force.deploy=true
+```
+
+:::note
+找到下游集群的集群 ID (c-xxxxx)。你可以在 Rancher UI 的**集群管理**中查看集群时在浏览器 URL 中找到 ID。
+:::
+
+此命令将使 Agent 清单重新应用新证书的校验和。
+
+#### 方法二:手动更新校验和环境变量
+
+将 `CATTLE_CA_CHECKSUM` 环境变量更新为匹配新 CA 证书校验和的值,从而手动为 Agent Kubernetes 对象打上补丁。通过以下操作生成新的校验和:
+
+```bash
+curl -k -s -fL /v3/settings/cacerts | jq -r .value | sha256sum | awk '{print $1}'
+```
+
+为每个下游集群使用 Kubeconfig 更新两个 Agent 部署的环境变量。如果集群启用了 [ACE](../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md),你可以[调整 kubectl 上下文](../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证),从而直接连接到下游集群。
+
+```bash
+kubectl edit -n cattle-system ds/cattle-node-agent
+kubectl edit -n cattle-system deployment/cattle-cluster-agent
+```
+
+#### 方法三:手动重新部署 Rancher agent
+
+该方法通过在每个下游集群的 control plane 节点上运行一组命令,从而重新应用 Rancher agent。
+
+对每个下游集群重复以下步骤:
+
+1. 检索 agent 注册 kubectl 命令:
+ 1. 找到下游集群的集群 ID (c-xxxxx)。你可以在 Rancher UI 的**集群管理**中查看集群时在浏览器 URL 中找到 ID。
+ 1. 将 Rancher Server URL 和集群 ID 添加到以下 URL:`https:///v3/clusterregistrationtokens?clusterId=`。
+ 1. 复制 `insecureCommand` 字段中的命令,使用此命令是因为未使用私有 CA。
+
+2. 使用以下其中一种方法,使用 kubeconfig 为下游集群运行上一步中的 kubectl 命令:
+ 1. 如果集群启用了 [ACE](../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md),你可以[调整上下文](../../../how-to-guides/new-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#直接使用下游集群进行身份验证),从而直接连接到下游集群。
+ 1. 或者,SSH 到 control plane 节点:
+ - RKE:使用[此处文档中的步骤](https://github.com/rancherlabs/support-tools/tree/master/how-to-retrieve-kubeconfig-from-custom-cluster)生成 kubeconfig
+ - RKE2/K3s:使用安装时填充的 kubeconfig
+
+### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
+
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+
+#### 为什么要执行这一步骤?
+
+Rancher 管理的集群中的 Fleet agent 存储了用于连接到 Rancher 的 kubeconfig。kubeconfig 包含一个 `certificate-authority-data` 字段,该字段包含 Rancher 使用的证书的 CA。更改 CA 时,你需要更新此块来允许 fleet-agent 信任 Rancher 使用的证书。
+
+## 将私有 CA 证书更改为公共证书
+
+按照以下步骤执行与上面相反的操作,将私有 CA 颁发的证书更改为公共或自签名 CA。
+
+### 1. 创建/更新证书 Secret 对象
+
+首先,将服务器证书和所有中间证书合并到名为 `tls.crt` 的文件,并在名为 `tls.key` 的文件中提供相应的证书密钥。
+
+使用以下命令在 Rancher(本地)管理集群中创建 `tls-rancher-ingress` Secret 对象:
+
+```bash
+kubectl -n cattle-system create secret tls tls-rancher-ingress \
+ --cert=tls.crt \
+ --key=tls.key
+```
+
+或者,更新现有的 `tls-rancher-ingress` Secret:
+
+```bash
+kubectl -n cattle-system create secret tls tls-rancher-ingress \
+ --cert=tls.crt \
+ --key=tls.key \
+ --dry-run --save-config -o yaml | kubectl apply -f -
+```
+
+### 2. 删除 CA 证书 Secret 对象
+
+你需要删除 `cattle-system` 命名空间中的 `tls-ca secret`(不再需要它)。如果需要,你还可以选择保存 `tls-ca` secret 的副本。
+
+要保存现有的 `tls-ca` Secret:
+
+```bash
+kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml
+```
+
+要删除现有的 `tls-ca` 密文:
+
+```bash
+kubectl -n cattle-system delete secret tls-ca
+```
+
+### 3. 重新配置 Rancher 部署
+
+如果证书源有更改,则需要执行此步骤。在这种情况下,它变化的原因很可能是因为 Rancher 之前配置为使用默认的自签名证书 (`ingress.tls.source=rancher`)。
+
+以下步骤更新了 Rancher Chart 的 Helm 值,因此 Rancher pod 和 Ingress 会使用在步骤 1 中创建的新证书。
+
+1. 调整初始安装期间使用的值,将当前值存储为:
+```bash
+helm get values rancher -n cattle-system -o yaml > values.yaml
+```
+1. 获取当前部署的 Rancher Chart 的版本字符串:
+```bash
+helm ls -n cattle-system
+```
+1. 更新 `values.yaml` 文件中的当前 Helm 值:
+ 1. 由于不再使用私有 CA,删除 `privateCA: true` 字段,或将其设置为 `false`。
+ 1. 根据需要调整 `ingress.tls.source` 字段。有关更多信息,请参阅 [Chart 选项](../installation-references/helm-chart-options.md#常用选项)。以下是一些示例:
+ 1. 如果使用公共 CA,继续使用 `secret`
+ 1. 如果使用 Let's Encrypt,将值更新为 `letsEncrypt`
+1. 使用 `values.yaml` 文件更新 Rancher Chart 的 Helm 值,并使用当前 Chart 版本防止升级:
+```bash
+ helm upgrade rancher rancher-stable/rancher \
+ --namespace cattle-system \
+ -f values.yaml \
+ --version
+```
+
+### 4. 为非私有/通用证书重新配置 Rancher Agent
+
+由于不再使用私有 CA,因此你需要删除下游集群 agent 上的 `CATTLE_CA_CHECKSUM` 环境变量,或将其设置为 ""(空字符串)。
+
+### 5. 强制更新 Fleet 集群,从而将 fleet-agent 重新连接到 Rancher
+
+在 Rancher UI 的[持续交付](../../../integrations-in-rancher/fleet/overview.md#在-rancher-ui-中访问-fleet)中,为集群选择“强制更新”,来允许下游集群中的 fleet-agent 成功连接到 Rancher。
+
+#### 为什么要执行这一步骤?
+
+Rancher 管理的集群中的 Fleet agent 存储了用于连接到 Rancher 的 kubeconfig。kubeconfig 包含一个 `certificate-authority-data` 字段,该字段包含 Rancher 使用的证书的 CA。更改 CA 时,你需要更新此块来允许 fleet-agent 信任 Rancher 使用的证书。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md
new file mode 100644
index 00000000000..f8a6cb3d1a0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md
@@ -0,0 +1,284 @@
+---
+title: 升级 Cert-Manager
+---
+
+Rancher 适配 API 版本 `cert-manager.io/v1` 并且在 cert-manager v1.13.1 版本上进行了测试。
+
+Rancher 使用 cert-manager 为 Rancher 高可用部署自动生成和续期 TLS 证书。从 2019 秋季开始,cert-manager 发生了以下的三个重要变更。如果你在此时间段前创建了 Rancher 高可用部署,请进行相关操作。
+
+1. [从 2019 年 11 月 1 日开始,Let's Encrypt 已阻止低于 0.8.0 的 cert-manager 实例。](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753)
+1. [Cert-manager 正在弃用和替换 certificate.spec.acme.solvers 字段](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/)。此更改暂时没有确切的截止日期。
+1. [Cert-manager 正在弃用 `v1alpha1` API 和替换它的 API 组](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/)。
+
+为了帮助你应对这些变化,本文将:
+
+1. 提供升级 cert-manager 步骤的文档。
+1. 阐述 cert-manager API 的变更,并提供 cert-manager 官方文档的链接,助你实现数据迁移。
+
+:::note 重要提示:
+
+如果你要将 cert-manager 从早于 1.5 的版本升级到最新版本,请按照以下[选项 C](#选项-c升级-15-及以下版本的-cert-manager) 中的步骤进行操作。请注意,你无需重新安装 Rancher 即可执行此升级。
+
+:::
+
+## 升级 Cert-Manager
+
+以下说明中使用的命名空间是由当前安装了 cert-manager 的命名空间决定的。如果它在 kube-system 中,在以下说明步骤中使用。你可以运行 `kubectl get pods --all-namespaces` 来验证,并检查 cert-manager-\* pods 列在哪个命名空间中。不要更改运行 cert-manager 的命名空间,否则可能会出现错误。
+
+要升级 cert-manager,请遵循步骤操作。
+
+### 选项 A:联网升级 cert-manager
+
+
+ 单击展开
+
+1. [备份现有资源](https://cert-manager.io/docs/tutorials/backup/):
+
+ ```plain
+ kubectl get -o yaml --all-namespaces \
+ issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml
+ ```
+
+ :::note 重要提示:
+
+ 如果你从低于 0.11.0 的版本升级,请将所有备份资源上的 apiVersion 从 `certmanager.k8s.io/v1alpha1` 升级到 `cert-manager.io/v1alpha2`。如果你需要在其他资源上使用 cert-manager 注释,请对其进行更新以反映新的 API 组。详情请参见[附加注释变更](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes)。
+
+ :::
+
+1. [卸载现有部署](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm):
+
+ ```plain
+ helm uninstall cert-manager
+ ```
+
+ 使用你安装的 vX.Y.Z 版本的链接删除 CustomResourceDefinition:
+
+ ```plain
+ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml
+
+ ```
+
+1. 单独安装 CustomResourceDefinition 资源:
+
+ ```plain
+ kubectl apply --validate=false -f https://github.com/cert-manager/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml
+
+ ```
+
+ :::note
+
+ 如果你运行的 Kubernetes 版本是 1.15 或更低版本,你需要在以上的 `kubectl apply` 命令中添加 `--validate=false`。否则你将看到 cert-manager CRD 资源中的 `x-kubernetes-preserve-unknown-fields` 字段校验错误提示。这是 kubectl 执行资源校验方式产生的良性错误。
+
+ :::
+
+1. 根据需要为 cert-manager 创建命名空间:
+
+ ```plain
+ kubectl create namespace cert-manager
+ ```
+
+1. 添加 Jetstack Helm 仓库:
+
+ ```plain
+ helm repo add jetstack https://charts.jetstack.io
+ ```
+
+1. 更新 Helm Chart 仓库本地缓存:
+
+ ```plain
+ helm repo update
+ ```
+
+1. 安装新版本的 cert-manager:
+
+ ```plain
+ helm install \
+ cert-manager jetstack/cert-manager \
+ --namespace cert-manager
+ ```
+
+1. [恢复备份资源](https://cert-manager.io/docs/tutorials/backup/#restoring-resources):
+
+ ```plain
+ kubectl apply -f cert-manager-backup.yaml
+ ```
+
+
+
+### 选项 B:在离线环境中升级 Cert-Manager
+
+
+ 单击展开
+
+### 先决条件
+
+在执行升级之前,先将所需的容器镜像添加到私有镜像仓库中,并下载/渲染所需的 Kubernetes manifest 文件来准备离线环境。
+
+1. 参见[准备私有镜像仓库](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md)指南,将升级所需的镜像推送到镜像仓库。
+
+1. 在可以连接互联网的系统中,将 cert-manager 仓库添加到 Helm:
+
+ ```plain
+ helm repo add jetstack https://charts.jetstack.io
+ helm repo update
+ ```
+
+1. 从 [Helm Chart 仓库](https://artifacthub.io/packages/helm/cert-manager/cert-manager)中获取最新可用的 cert-manager Chart:
+
+ ```plain
+ helm fetch jetstack/cert-manager
+ ```
+
+1. 使用安装 Chart 的选项来渲染 cert-manager 模板。记住要设置 `image.repository` 选项,以从你的私有镜像仓库拉取镜像。此操作会创建一个包含 Kubernetes manifest 文件的 `cert-manager` 目录。
+
+ Helm 3 命令如下:
+
+ ```plain
+ helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \
+ --namespace cert-manager \
+ --set image.repository=/quay.io/jetstack/cert-manager-controller
+ --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook
+ --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector
+ ```
+
+
+
+ Helm 2 命令如下:
+
+ ```plain
+ helm template ./cert-manager-v0.12.0.tgz --output-dir . \
+ --name cert-manager --namespace cert-manager \
+ --set image.repository=/quay.io/jetstack/cert-manager-controller
+ --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook
+ --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector
+ ```
+
+1. 下载新旧版 cert-manager 所需的 CRD 文件:
+
+ ```plain
+ curl -L -o cert-manager-crd.yaml https://raw.githubusercontent.com/cert-manager/cert-manager/release-0.12/deploy/manifests/00-crds.yaml
+ curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/cert-manager/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml
+ ```
+
+### 安装 cert-manager
+
+1. 备份现有资源:
+
+ ```plain
+ kubectl get -o yaml --all-namespaces \
+ issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml
+ ```
+
+ :::note 重要提示:
+
+ 如果你从低于 0.11.0 的版本升级,请将所有备份资源上的 apiVersion 从 `certmanager.k8s.io/v1alpha1` 升级到 `cert-manager.io/v1alpha2`。如果你需要在其他资源上使用 cert-manager 注释,请对其进行更新以反映新的 API 组。详情请参见[附加注释变更](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes)。
+
+ :::
+
+1. 删除现有的 cert-manager 安装包:
+
+ ```plain
+ kubectl -n cert-manager \
+ delete deployment,sa,clusterrole,clusterrolebinding \
+ -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2'
+ ```
+
+ 使用你安装的 vX.Y 版本的链接删除 CustomResourceDefinition:
+
+ ```plain
+ kubectl delete -f cert-manager/cert-manager-crd-old.yaml
+ ```
+
+1. 单独安装 CustomResourceDefinition 资源:
+
+ ```plain
+ kubectl apply -f cert-manager/cert-manager-crd.yaml
+ ```
+
+ :::note 重要提示:
+
+ 如果你运行的 Kubernetes 版本是 1.15 或更低版本,你需要在以上的 `kubectl apply` 命令中添加 `--validate=false`。否则你将看到 cert-manager CRD 资源中的 `x-kubernetes-preserve-unknown-fields` 字段校验错误提示。这是 kubectl 执行资源校验方式产生的良性错误。
+
+ :::
+
+1. 为 cert-manager 创建命名空间:
+
+ ```plain
+ kubectl create namespace cert-manager
+ ```
+
+1. 安装 cert-manager
+
+ ```plain
+ kubectl -n cert-manager apply -R -f ./cert-manager
+ ```
+
+1. [恢复备份资源](https://cert-manager.io/docs/tutorials/backup/#restoring-resources):
+
+ ```plain
+ kubectl apply -f cert-manager-backup.yaml
+ ```
+
+
+
+### 选项 C:升级 1.5 及以下版本的 cert-manager
+
+
+ 单击展开
+
+以前,要升级旧版本的 cert-manager,我们建议卸载并重新安装 Rancher。使用以下方法,你可以升级 cert-manager 而无需执行此额外步骤,从而更好地保护你的生产环境:
+
+1. 按照[安装指南](https://cert-manager.io/docs/usage/cmctl/#installation)安装 `cmctl`(cert-manager CLI 工具)。
+
+1. 确保所有以已弃用的 API 版本存储在 etcd 中的 cert-manager 自定义资源都迁移到 v1:
+
+ ```
+ cmctl upgrade migrate-api-version
+ ```
+ 有关详细信息,请参阅 [API 版本迁移文档](https://cert-manager.io/docs/usage/cmctl/#migrate-api-version)。另请参阅[将 1.5 升级到 1.6](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/) 和[将 1.6 升级到到 1.7](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/)。
+
+1. 正常使用 `helm upgrade` 将 cert-manager 升级到 1.7.1。如果需要,你可以直接从版本 1.5 转到 1.7。
+
+1. 按照 Helm 教程[更新发布清单的 API 版本](https://helm.sh/docs/topics/kubernetes_apis/#updating-api-versions-of-a-release-manifest)。Chart 发布名称为 `release_name=rancher`,发布命名空间为 `release_namespace=cattle-system`。
+
+1. 在解码后的文件中,搜索 `cert-manager.io/v1beta1` 并将其**替换**为 `cert-manager.io/v1`。
+
+1. 使用 `helm upgrade` 正常升级 Rancher。
+
+
+
+### 验证部署
+
+安装完 cert-manager 后,你可以通过检查 kube-system 命名空间中正在运行的 Pod 来验证它是否已正确部署:
+
+```
+kubectl get pods --namespace cert-manager
+
+NAME READY STATUS RESTARTS AGE
+cert-manager-5c6866597-zw7kh 1/1 Running 0 2m
+cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m
+cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m
+```
+
+## Cert-Manager API 变更和数据迁移
+
+---
+
+Rancher 现在支持 cert-manager 1.6.2 和 1.7.1。推荐使用 v1.7.x,因为 v 1.6.x 将在 2022 年 3 月 30 日结束生命周期。详情请参见 [cert-manager 文档](../install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。有关将 cert-manager 从 1.5 升级到 1.6 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/)。有关将 cert-manager 从 1.6 升级到 1.7 的说明,请参见上游的 [cert-manager 文档](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/)。
+
+---
+
+Cert-manager 已经弃用 `certificate.spec.acme.solvers` 字段,而且会在未来的版本中放弃对该字段的支持。
+
+根据 cert-manager 文档,v0.8 引入了配置 ACME 证书资源的新格式。具体来说,就是移动了 challenge solver 字段。v0.9 新旧格式均支持。请知悉,之后发布的新 cert-manager 版本会放弃对旧格式的支持。Cert-Manager 文档建议你在更新后,将 ACME 颁发者和证书资源更新到新格式。
+
+如需了解变更细节以及迁移说明,请参见[将 Cert-Manager 从 v0.7 升级到 v0.8](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/)。
+
+v0.11 版本标志着删除先前 Cert-Manager 版本中使用的 v1alpha1 API,以及将 API 组从 certmanager.k8s.io 更改到 cert-manager.io。
+
+此外,我们已不再支持 v0.8 版本中已弃用的旧配置格式。换言之,在升级到 v0.11 之前,你必须先为 ACME 发行者使用新的 solver 样式配置格式作为过渡。详情请参见[升级到 v0.8](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/)。
+
+如需了解变更细节以及迁移说明,请参见[将 Cert-Manager 从 v0.10 升级到 v0.11](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/)。
+
+如需获得更多信息,请参见 [Cert-Manager 升级](https://cert-manager.io/docs/installation/upgrade/)。
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md
new file mode 100644
index 00000000000..b23f7651dd3
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md
@@ -0,0 +1,126 @@
+---
+title: 升级和回滚 Kubernetes
+---
+
+升级到最新版本的 Rancher 之后,下游 Kubernetes 集群可以升级为 Rancher 支持的最新的 Kubernetes 版本。
+
+Rancher 使用 RKE(Rancher Kubernetes Engine)来预置和编辑 RKE 集群。有关为 RKE 集群配置升级策略的更多信息,请参阅 [RKE 文档](https://rancher.com/docs/rke/latest/en/)。
+
+
+## 经过测试的 Kubernetes 版本
+
+Rancher 在发布新版本之前,会对其与 Kubernetes 的最新次要版本进行测试,以确保兼容性。有关各个 Rancher 版本测试了哪些 Kubernetes 版本的详细信息,请参阅[支持维护条款](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/)。
+
+## 升级的工作原理
+
+RKE v1.1.0 改变了集群升级的方式。
+
+在 [RKE 文档](https://rancher.com/docs/rke/latest/en/upgrades/how-upgrades-work)中,你将了解编辑或升级 RKE Kubernetes 集群时会发生的情况。
+
+
+## 升级的最佳实践
+
+在升级集群的 Kubernetes 版本时,我们建议你:
+
+1. 拍一张快照。
+1. 启动 Kubernetes 升级。
+1. 如果升级失败,请将集群恢复到升级前的 Kubernetes 版本。这可以通过选择**恢复 etcd 和 Kubernetes 版本**选项来实现。在恢复 etcd 快照 之前,这会将你的集群恢复到升级前的 kubernetes 版本。
+
+恢复操作将在不处于健康或 active 状态的集群上运行。
+
+## 升级 Kubernetes 版本
+
+:::note 先决条件:
+
+- 以下选项适用于 [Rancher 启动的 Kubernetes 集群](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)和[注册的 K3s Kubernetes 集群](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#已注册-rke2-和-k3s-集群的附加功能)。
+- 以下选项也适用于导入且已注册的 RKE2 集群。如果你从外部云平台导入集群但不注册,你将无法在 Rancher 中升级 Kubernetes 版本。
+- 在升级 Kubernetes 之前,先[备份你的集群](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md)。
+
+:::
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,进入要升级的集群,然后点击 **⋮ > 编辑配置**。
+1. 从 **Kubernetes 版本** 下拉列表中,选择要用于集群的 Kubernetes 版本。
+1. 单击**保存**。
+
+**结果**:已开始为集群升级 Kubernetes。
+
+## 回滚
+
+你可以将集群恢复到使用先前 Kubernetes 版本的备份。有关详细信息,请参阅:
+
+- [备份集群](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#快照工作原理)
+- [使用备份恢复集群](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md#使用快照恢复集群)
+
+## 配置升级策略
+
+从 RKE v1.1.0 开始,我们提供了额外的升级选项,让你更精细地控制升级过程。如果满足[条件和要求](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability),你可以使用这些选项,从而在集群升级期间维持应用的可用性。
+
+你可以在 Rancher UI 中配置升级策略,也可以通过编辑 `cluster.yml` 来配置策略。编辑 `cluster.yml` 可以配置更多高级选项。
+
+### 在 Rancher UI 中配置最大不可用的 Worker 节点
+
+你可以在 Rancher UI 中配置不可用 worker 节点的最大数量。在集群升级期间,worker 节点将按此大小批量升级。
+
+默认情况下,不可用 worker 节点的最大数量为所有 worker 节点的 10%。此数字可以配置为百分比或整数。当定义为百分比时,批大小会被四舍五入到最近的节点,最小为一个节点。
+
+要更改 worker 节点的默认数量或百分比:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,进入要升级的集群,然后点击 **⋮ > 编辑配置**。
+1. 在**升级策略**选项卡中,输入 **Worker 并发**作为固定的数字或百分比。你可以通过将集群中的节点数减去最大不可用节点数来获取该数字。
+1. 单击**保存**。
+
+**结果**:集群更新为使用新的升级策略。
+
+### 使用 Rancher UI 在升级期间启用节点清空
+
+默认情况下,RKE 会在升级之前[封锁](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration)每个节点。默认情况下,[清空](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)会在升级期间被禁用。如果在集群配置中启用了清空,RKE 将在升级之前对节点进行封锁和清空。
+
+要在集群升级期间清空每个节点:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,进入要启用节点清空的集群,然后点击 **⋮ > 编辑配置**。
+1. 单击 **⋮ > 编辑**。
+1. 在**升级策略**选项卡中,转到**清空节点**字段并单击**是**。controlplane 和 worker 节点的清空是单独配置的。
+1. 配置如何删除 pod 的选项。有关每个选项的详细信息,请参阅[本节](../../how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md#激进和安全的清空选项)。
+1. (可选)配置宽限期。宽限期是给每个 pod 进行清理的超时时间,能让 pod 有机会优雅地退出。Pod 可能需要完成任何未完成的请求、回滚事务或将状态保存到某些外部存储。如果该值为负数,将使用 pod 中指定的默认值。
+1. (可选)配置超时,这是在清空放弃之前应该继续等待的时间。
+1. 单击**保存**。
+
+**结果**:集群更新为使用新的升级策略。
+
+:::note
+
+目前存在一个[已知问题](https://github.com/rancher/rancher/issues/25478),即使 etcd 和 controlplane 正在被清空, Rancher UI 也不会将它们的状态显示为 drained。
+
+:::
+
+### 在升级期间维护应用的可用性
+
+_从 RKE v1.1.0 起可用_
+
+在 [RKE 文档](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/)中,你将了解在升级集群时防止应用停机的要求。
+
+### 在 cluster.yml 中配置升级策略
+
+你通过编辑 `cluster.yml` 来获得更高级的升级策略配置选项。
+
+有关详细信息,请参阅 RKE 文档中的[配置升级策略](https://rancher.com/docs/rke/latest/en/upgrades/configuring-strategy)。这部分还包括一个用于配置升级策略的示例 `cluster.yml`。
+
+## 故障排除
+
+如果升级后节点没有出现,`rke up` 命令会出错。
+
+如果不可用节点的数量超过配置的最大值,则不会进行升级。
+
+如果升级停止,你可能需要修复不可用节点或将其从集群中删除,然后才能继续升级。
+
+失败的节点可能处于许多不同的状态:
+
+- 关机
+- 不可用
+- 用户在升级过程中清空了节点,因此节点上没有 kubelet
+- 升级本身失败
+
+如果在升级过程中达到最大不可用节点数,Rancher 的下游集群将停留在更新中的状态,并且不会继续升级其他 controlplane 节点。它将继续评估不可用的节点集,以防其中一个节点变得可用。如果无法修复节点,则必须移除节点才能继续升级。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
new file mode 100644
index 00000000000..3d01c38aed8
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md
@@ -0,0 +1,91 @@
+---
+title: 在不升级 Rancher 的情况下升级 Kubernetes
+---
+
+RKE 元数据功能允许你在新版本 Kubernetes 发布后立即为集群配置新版本,而无需升级 Rancher。此功能对于使用 Kubernetes 的补丁版本非常有用,例如,在原本支持 Kubernetes v1.14.6 的 Rancher Server 版本中,将 Kubernetes 升级到 v1.14.7。
+
+:::note
+
+Kubernetes API 可以在次要版本之间更改。因此,我们不支持引入 Kubernetes 次要版本,例如在 Rancher 支持 v1.14 的情况下引入 v1.15。在这种情况下,你需要升级 Rancher 以添加对 Kubernetes 次要版本的支持。
+
+:::
+
+Rancher 的 Kubernetes 元数据包含 Rancher 用于配置 [RKE 集群](../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的 Kubernetes 版本信息。Rancher 会定期同步数据并为 **系统镜像**、**服务选项**和**插件模板**创建自定义资源定义 (CRD)。因此,当新的 Kubernetes 版本与 Rancher Server 版本兼容时,Kubernetes 元数据可以使 Rancher 使用新版本来配置集群。元数据概述了 [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) 用于部署各种 Kubernetes 版本的信息。
+
+下表描述了受周期性数据同步影响的 CRD。
+
+:::note
+
+只有管理员可以编辑元数据 CRD。除非明确需要,否则建议不要更新现有对象。
+
+:::
+
+| 资源 | 描述 | Rancher API URL |
+|----------|-------------|-----------------|
+| 系统镜像 | 用于通过 RKE 部署 Kubernetes 集群的系统镜像列表。 | `/v3/rkek8ssystemimages` |
+| 服务选项 | 传递给 Kubernetes 组件的默认选项,例如 `kube-api`、`scheduler`、`kubelet`、`kube-proxy` 和 `kube-controller-manager` | `/v3/rkek8sserviceoptions` |
+| 插件模板 | 用于部署插件组件的 YAML 定义,例如 Canal、Calico、Flannel、Weave、Kube-dns、CoreDNS、`metrics-server`、`nginx-ingress` | `/v3/rkeaddons` |
+
+管理员可以通过配置 RKE 元数据设置来执行以下操作:
+
+- 刷新 Kubernetes 元数据。适用于有新的 Kubernetes 补丁版本发布,而用户希望在不升级 Rancher 的情况下为集群配置最新版本的 Kubernetes 的情景。
+- 更改 Rancher 用于同步元数据的 URL。适用于要让 Rancher 从本地同步而不是与 GitHub 同步的情况。这在离线环境下非常有用。
+- 防止 Rancher 自动同步元数据。这可以防止在 Rancher 中使用新的/不受支持的 Kubernetes 版本。
+
+### 刷新 Kubernetes 元数据
+
+默认情况下,管理员或具有**管理集群驱动**[全局角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)的用户,可以刷新 Kubernetes 元数据。
+
+要强制 Rancher 刷新 Kubernetes 元数据,可以执行手动刷新操作:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航菜单中,单击**驱动**。
+1. 单击**刷新 Kubernetes 元数据**。
+
+你可以将 `refresh-interval-minutes` 设置为 `0`(见下文),将 Rancher 配置为仅在需要时刷新元数据,并在需要时使用此按钮手动执行元数据刷新。
+
+### 配置元数据同步
+
+:::caution
+
+只有管理员可以更改这些设置。
+
+:::
+
+RKE 元数据的配置控制 Rancher 同步元数据的频率以及从何处下载数据。你可以通过 Rancher UI 或通过 Rancher API 端点 `v3/settings/rke-metadata-config` 配置元数据。
+
+元数据的配置方式取决于 Rancher 版本。
+
+要在 Rancher 中编辑元数据配置:
+
+1. 在左上角,单击 **☰ > 全局设置**。
+1. 转到 **rke-metadata-config**。单击 **⋮ > 编辑设置**。
+1. 你可以选择填写以下参数:
+
+- `refresh-interval-minutes`:Rancher 等待同步元数据的时间。如果要禁用定期刷新,请将 `refresh-interval-minutes` 设置为 0。
+- `url`:Rancher 从中获取数据的 HTTP 路径。该路径必须是 JSON 文件的直接路径。例如,Rancher v2.4 的默认 URL 是 `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`。
+1. 单击**保存**。
+
+如果你没有离线设置,则无需指定 Rancher 获取元数据的 URL,因为默认是从 [Rancher 的元数据 Git 仓库获取](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json)的。
+
+但是,如果你有[离线设置](#离线设置)需求,你需要将 Kubernetes 元数据仓库镜像到 Rancher 可用的位置。然后,你需要更改 URL 来指向 JSON 文件的新位置。
+
+### 离线设置
+
+Rancher Server 会定期刷新 `rke-metadata-config` 来下载新的 Kubernetes 版本元数据。有关 Kubernetes 和 Rancher 版本的兼容性表,请参阅[服务条款](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/)。
+
+如果你使用离线设置,则可能无法从 Rancher 的 Git 仓库自动定期刷新 Kubernetes 元数据。在这种情况下,应该禁用定期刷新以防止在日志中显示相关错误。或者,你可以配置元数据,以便 Rancher 与本地的 RKE 元数据副本进行同步。
+
+要将 Rancher 与 RKE 元数据的本地镜像同步,管理员需要配置 `rke-metadata-config` 来指向镜像。详情请参考[配置元数据同步](#配置元数据同步)
+
+在将新的 Kubernetes 版本加载到 Rancher Server 中之后,需要执行其他步骤才能使用它们启动集群。Rancher 需要访问更新的系统镜像。虽然只有管理员可以更改元数据设置,但任何用户都可以下载 Rancher 系统镜像并为镜像准备私有容器镜像仓库。
+
+要下载私有镜像仓库的系统镜像:
+
+1. 点击左上角的 **☰**。
+1. 点击左侧导航底部的**简介**。
+1. 下载适用于 Linux 或 Windows 操作系统的镜像。
+1. 下载 `rancher-images.txt`。
+1. 使用[离线环境安装](other-installation-methods/air-gapped-helm-cli-install/publish-images.md)时使用的步骤准备私有镜像仓库,但不要使用发布页面中的 `rancher-images.txt`,而是使用上一个步骤中获取的文件。
+
+**结果**:Rancher 的离线安装现在可以同步 Kubernetes 元数据。如果你在发布新版本的 Kubernetes 时更新了私有镜像仓库,你可以使用新版本配置集群,而无需升级 Rancher。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/overview.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/overview.md
new file mode 100644
index 00000000000..be0cf58660c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/overview.md
@@ -0,0 +1,65 @@
+---
+title: 概述
+---
+
+Rancher 是一个为使用容器的公司打造的容器管理平台。Rancher 使得开发者可以随处运行 Kubernetes(Run Kubernetes Everywhere),满足 IT 需求规范,赋能 DevOps 团队。
+
+## Run Kubernetes Everywhere
+
+Kubernetes 已经成为容器编排标准。现在,大多数云和虚拟化提供商都提供容器编排服务。Rancher 用户可以选择使用 Rancher Kubernetes Engine(RKE)或云 Kubernetes 服务(例如 GKE、AKS 和 EKS)创建 Kubernetes 集群,还可以导入和管理使用任何 Kubernetes 发行版或安装程序创建的现有 Kubernetes 集群。
+
+## 满足 IT 需求规范
+
+Rancher 支持对其控制的所有 Kubernetes 集群进行集中认证、访问控制和监控。例如,你可以:
+
+- 使用你的 Active Directory 凭证访问由云提供商(例如 GKE)托管的 Kubernetes 集群。
+- 设置所有用户、组、项目、集群和云服务的权限控制策略和安全策略。
+- 一站式查看 Kubernetes 集群的运行状况和容量。
+
+## 赋能 DevOps 团队
+
+Rancher 为 DevOps 工程师提供简单直接的用户界面,以管理其应用负载。用户不需要对 Kubernetes 有非常深入的了解,即可使用 Rancher。Rancher 应用商店包含一套实用的 DevOps 开发工具。Rancher 获得了多种云原生生态系统产品的认证,包括安全工具、监控系统、容器镜像仓库、存储和网络驱动等。
+
+下图讲述了 Rancher 在 IT 管理团队和 DevOps 开发团队之间扮演的角色。DevOps 团队把他们的应用部署在他们选择的公有云或私有云上。IT 管理员负责查看并管理用户、集群、云服务的权限。
+
+
+
+## Rancher API Server 的功能
+
+Rancher API Server 是基于嵌入式 Kubernetes API Server 和 etcd 数据库建立的,它提供了以下功能:
+
+### 授权和基于角色的权限控制(RBAC)
+
+- **用户管理**:Rancher API Server 除了管理本地用户,还[管理用户用来访问外部服务所需的认证信息](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md),如登录 Active Directory 和 GitHub 所需的账号密码。
+- **授权**:Rancher API Server 可以管理[访问控制策略](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)和[安全策略](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md)。
+
+### 使用 Kubernetes 的功能
+
+- **配置 Kubernetes 集群**:Rancher API Server 可以在已有节点上[配置 Kubernetes](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md),或进行 [Kubernetes 版本升级](installation-and-upgrade/upgrade-and-roll-back-kubernetes.md)。
+- **管理应用商店**:Rancher 支持使用 [Helm Chart 应用商店](../how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md)实现轻松重复部署应用。
+- **管理项目**:项目由集群中多个命名空间和访问控制策略组成,是 Rancher 中的一个概念,Kubernetes 中并没有这个概念。你可以使用项目实现以组为单位,管理多个命名空间,并进行 Kubernetes 相关操作。Rancher UI 提供用于[项目管理](../how-to-guides/advanced-user-guides/manage-projects/manage-projects.md)和[项目内应用管理](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-resources-setup.md)的功能。
+- **Fleet 持续交付**:在 Rancher 中,你可以使用 [Fleet 持续交付](../integrations-in-rancher/fleet/fleet.md)将应用程序从 Git 仓库部署到目标下游 Kubernetes 集群,无需任何手动操作。
+- **Istio**:[Rancher 与 Istio 集成](../integrations-in-rancher/istio/istio.md),使得管理员或集群所有者可以将 Istio 交给开发者,然后开发者使用 Istio 执行安全策略,排查问题,或为蓝绿部署,金丝雀部署,和 A/B 测试进行流量管理。
+
+### 配置云基础设施
+
+- **同步节点信息**:Rancher API Server 可以同步所有集群中全部[节点](../how-to-guides/new-user-guides/manage-clusters/nodes-and-node-pools.md)的信息。
+- **配置云基础设施**:如果你为 Rancher 配置了云提供商,Rancher 可以在云端动态配置[新节点](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)和[持久化存储](../how-to-guides/new-user-guides/manage-clusters/create-kubernetes-persistent-storage/create-kubernetes-persistent-storage.md)。
+
+### 查看集群信息
+
+- **日志管理**:Rancher 可以与多种 Kubernetes 集群之外的主流日志管理工具集成。
+- **监控**:你可以使用 Rancher,通过业界领先并开源的 Prometheus 来监控集群节点、Kubernetes 组件和软件部署的状态和进程。
+- **告警**:为了保证集群和应用的正常运行,提高公司的生产效率,你需要随时了解集群和项目的计划内和非计划事件。
+
+## 使用 Rancher 编辑下游集群
+
+对于已有集群而言,可提供的选项和设置取决于你配置集群的方法。例如,只有[通过 RKE 启动](../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)的集群才有可编辑的**集群选项**。
+
+使用 Rancher 创建集群后,集群管理员可以管理集群成员,管理节点池,或进行[其他操作](../reference-guides/cluster-configuration/cluster-configuration.md)。
+
+下表总结了每一种类型的集群和对应的可编辑的选项和设置:
+
+import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md';
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md
new file mode 100644
index 00000000000..1d06135d7f4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md
@@ -0,0 +1,6 @@
+---
+title: Rancher Prime AWS Marketplace 快速入门
+description: 使用 Amazon Marketplace 列表部署 Rancher Server。
+---
+
+你可以快速将 Rancher Prime 部署到 Amazon Elastic Kubernetes Service (EKS)。详情请参见[说明](https://suse-enceladus.github.io/marketplace-docs/rancher-prime/aws/?repository=rancher-payg-billing-adapter-llc-prd)以及我们的 [Amazon Marketplace 列表](https://aws.amazon.com/marketplace/pp/prodview-go7ent7goo5ae)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws.md
new file mode 100644
index 00000000000..34590ac8931
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/aws.md
@@ -0,0 +1,95 @@
+---
+title: Rancher AWS 快速入门指南
+description: 阅读此分步 Rancher AWS 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 AWS 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Amazon AWS 会产生费用。
+
+:::
+
+- [Amazon AWS 账号](https://aws.amazon.com/account/): 用于创建部署 Rancher Server 和 Kubernetes 的资源。
+- [Amazon AWS 访问密钥](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html):如果你没有的话,请访问此链接查看相关指南。
+- [已创建 IAM 策略](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start):定义附加此策略的账号所具有的权限。
+- [Terraform](https://www.terraform.io/downloads.html): 用于在 Amazon AWS 中配置服务器和集群。
+
+### IAM 策略示例
+
+AWS 模块只创建一个 EC2 密钥对、一个 EC2 安全组和一个 EC2 实例。以下是一个简单的策略:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "ec2:*",
+ "Resource": "*"
+ }
+ ]
+}
+```
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/aws` 命令,进入包含 Terraform 文件的 AWS 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+
+ - `aws_access_key` - 替换为 Amazon AWS 访问密钥
+ - `aws_secret_key` - 替换为 Amazon AWS Secret 密钥
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/aws) 了解更多信息。
+ 建议包括:
+
+ - `aws_region` - Amazon AWS 区域。AWS 的默认区域 (`us-east-1`) 不一定是距离你最近的区域。建议修改为距离你最近的区域。
+ - `prefix` - 所有创建资源的前缀
+ - `instance_type` - EC2 使用的实例规格,最小规格为 `t3a.medium` 。如果在预算范围内,可以使用 `t3a.large` 或 `t3a.xlarge`。
+ - `add_windows_node` - 如果设为 true,一个额外的 Windows worker 节点会添加到工作负载集群中。
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 16 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/aws` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+##### 结果
+
+两个 Kubernetes 集群已部署到你的 AWS 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+## 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/aws` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/azure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/azure.md
new file mode 100644
index 00000000000..2775340b7a5
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/azure.md
@@ -0,0 +1,81 @@
+---
+title: Rancher Azure 快速入门指南
+description: 阅读此分步 Rancher Azure 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 Azure 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Microsoft Azure 会产生费用。
+
+:::
+
+- [Microsoft Azure 账号](https://azure.microsoft.com/en-us/free/):用于创建部署 Rancher 和 Kubernetes 的资源。
+- [Microsoft Azure 订阅](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal):如果你没有的话,请访问此链接查看如何创建 Microsoft Azure 订阅。
+- [Micsoroft Azure 租户](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant):访问此链接并参考教程以创建 Microsoft Azure 租户。
+- [Microsoft Azure 客户端 ID/密文](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal):访问此链接并参考教程以创建 Microsoft Azure 客户端和密文。
+- [Terraform](https://www.terraform.io/downloads.html):用于在 Microsoft Azure 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/azure` 命令,进入包含 Terraform 文件的 Azure 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `azure_subscription_id` - 替换为 Microsoft Azure 订阅 ID。
+ - `azure_client_id` - 替换为 Microsoft Azure 客户端 ID。
+ - `azure_client_secret` - 替换为 Microsoft Azure 客户端密文。
+ - `azure_tenant_id` - 替换为 Microsoft Azure 租户 ID。
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/azure) 了解更多信息。建议包括:
+ - `azure_location` - Microsoft Azure 区域。Azure 的默认区域 (`East US`) 不一定是距离你最近的区域。建议修改为距离你最近的区域。
+ - `prefix` - 所有创建资源的前缀
+ - `instance_type` - 使用的计算实例大小,最小规格为 `Standard_DS2_v2`。如果在预算范围内,可以使用 `Standard_DS2_v3` 或 `Standard_DS3_v2`。
+ - `add_windows_node` - 如果设为 true,一个额外的 Windows worker 节点会添加到工作负载集群中。
+ - `windows_admin_password` - Windows worker 节点管理员的密码
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 16 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/azure` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 Azure 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/azure` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/deploy-rancher-manager.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/deploy-rancher-manager.md
new file mode 100644
index 00000000000..5eebb87e4e8
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/deploy-rancher-manager.md
@@ -0,0 +1,20 @@
+---
+title: 部署 Rancher Server
+---
+
+你可使用以下指南之一,在你选择的提供商中部署和配置 Rancher 和 Kubernetes 集群。
+
+- [AWS](aws.md)(使用 Terraform)
+- [AWS Marketplace](aws-marketplace.md)(使用 Amazon EKS)
+- [Azure](azure.md)(使用 Terraform)
+- [DigitalOcean](digitalocean.md)(使用 Terraform)
+- [GCP](gcp.md)(使用 Terraform)
+- [Hetzner Cloud](hetzner-cloud.md)(使用 Terraform)
+- [Linode](linode.md) (使用 Terraform)
+- [Vagrant](vagrant.md)
+- [Equinix Metal](equinix-metal.md)
+- [Outscale](outscale-qs.md)(使用 Terraform)
+
+如有需要,你可以查看以下指南以了解分步步骤。如果你需要在其他提供商中或本地运行 Rancher,或者你只是想看看它是多么容易上手,你可阅读以下指南:
+
+- [手动安装](helm-cli.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md
new file mode 100644
index 00000000000..8e0280f9fa6
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md
@@ -0,0 +1,74 @@
+---
+title: Rancher DigitalOcean 快速入门指南
+description: 阅读此分步 Rancher DigitalOcean 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 DigitalOcean 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 DigitalOcean 会产生费用。
+
+:::
+
+- [DigitalOcean 账号](https://www.digitalocean.com):用于运行服务器和集群。
+- [DigitalOcean 访问密钥](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key):如果你没有的话,请访问此链接创建一个。
+- [Terraform](https://www.terraform.io/downloads.html):用于在 DigitalOcean 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/do` 命令,进入包含 Terraform 文件的 DigitalOcean 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `do_token` - 替换为 DigitalOcean 访问密钥
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/do) 了解更多信息。建议包括:
+ - `do_region` - DigitalOcean 区域。DigitalOcean 的默认区域不一定是距离你最近的区域。建议修改为距离你最近的区域。
+ - `prefix` - 所有创建资源的前缀
+ - `droplet_size` - 使用的计算实例规格,最小规格为`s-2vcpu-4gb`。如果在预算范围内,可以使用 `s-4vcpu-8gb`。
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 15 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/do` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 DigitalOcean 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/do` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md
new file mode 100644
index 00000000000..bca4519d296
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md
@@ -0,0 +1,106 @@
+---
+title: Rancher Equinix Metal 快速入门
+---
+
+## 本章节引导你:
+
+- 配置 Equinix Metal server
+- 安装 Rancher 2.x
+- 创建你的第一个集群
+- 部署一个 Nginx 应用
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。不建议将 Docker 安装用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 快速入门概述
+
+本指南划分为不同任务,以便于使用。
+
+
+
+## 先决条件
+
+- [Equinix Metal 账号](https://metal.equinix.com/developers/docs/accounts/users/)
+- [Equinix Metal 项目](https://metal.equinix.com/developers/docs/accounts/projects/)
+
+
+### 1. 配置 Equinix Metal 主机
+
+开始部署 Equinix Metal 主机。你可以使用 Equinix Metal 控制台、CLI 或 API 来配置 Equinix Metal Server。如果你需要了解每种 Deployment 类型的说明,请参见 [Equinix Metal 部署](https://metal.equinix.com/developers/docs/deploy/on-demand/)。以下链接介绍 Equinix Metal Server 的类型以及价格:
+- [Equinix Metal Server 类型](https://metal.equinix.com/developers/docs/servers/about/)
+- [Equinix Metal 价格](https://metal.equinix.com/developers/docs/servers/server-specs/)
+
+:::note 注意事项:
+
+- 如果使用 CLI 或 API 配置新的 Equinix Metal Server,你需要提供项目 ID、计划、metro 和操作系统。
+- 当使用云主机的虚拟机时,你需要允许 80 和 443 端口的入站 TCP 通信。有关端口配置的信息,请参见你的云主机的文档。
+- 如需了解所有端口要求,请参见 [Docker 安装](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md)。
+- 根据我们的[要求](../../installation-and-upgrade/installation-requirements/installation-requirements.md)配置主机。
+
+:::
+### 2. 安装 Rancher
+
+要在 Equinix Metal 主机上安装 Rancher,先与它连接,然后使用 shell 进行安装。
+
+1. 使用你惯用的 shell(例如 PuTTy 或远程终端)登录到你的 Equinix Metal 主机。
+
+2. 在 shell 中执行以下命令:
+
+ ```
+ sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher
+ ```
+
+**结果**:Rancher 已安装。
+
+### 3. 登录
+
+登录到 Rancher 后,你还需要进行一些一次性配置。
+
+1. 打开 Web 浏览器并输入主机的 IP 地址`https://`。
+
+ 将 `` 替换为你的主机 IP 地址。
+
+2. 出现提示时,为默认 `admin` 账号创建密码。
+
+3. 设置 **Rancher Server URL**。URL 可以是 IP 地址或主机名。需要注意,添加到集群中的每个节点都必须能够连接到此 URL。 如果你在 URL 中使用主机名,则此主机名必须在 DNS 中解析到你需要添加到集群的节点上。
+
+
+
+### 4. 创建集群
+
+欢迎使用 Rancher!现在,你可以创建你的第一个 Kubernetes 集群了。
+
+在此任务中,你可以使用**自定义**选项。此选项允许你把 _任意_ Linux 主机(云虚拟机、本地虚拟机或裸机)添加到集群中。
+
+1. 点击 **☰ > 集群管理**。
+1. 在**集群**页面,点击**创建**。
+1. 选择**自定义**。
+1. 输入**集群名称**。
+1. 点击**下一步**。
+1. 在**节点角色**中,选择 _全部_ 角色,即 **etcd**,**Control** 和 **Worker**。
+ - **可选**:Rancher 会自动检测用于 Rancher 通信和集群通信的 IP 地址。你可以使用**节点地址**处的`公有地址`和`内网地址`进行覆盖。
+1. 将注册命令复制到剪贴板。
+1. 使用你惯用的 shell(例如 PuTTy 或远程终端)登录到你的 Linux 主机。粘贴剪贴板的命令并运行。
+1. 在 Linux 主机上运行完命令后,单击**完成**。
+
+**结果**:
+
+你已创建集群,集群的状态是**配置中**。Rancher 已在你的集群中。
+
+当集群状态变为 **Active** 后,你可访问集群。
+
+**Active** 状态的集群会分配到两个项目:
+
+- `Default`:包含 `default` 命名空间
+- `System`:包含 `cattle-system`,`ingress-nginx`,`kube-public` 和 `kube-system` 命名空间。
+
+#### 已完成!
+
+恭喜!你已创建第一个集群。
+
+#### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md
new file mode 100644
index 00000000000..74af3068f04
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md
@@ -0,0 +1,77 @@
+---
+title: Rancher GCP 快速入门指南
+description: 阅读此分步 Rancher GCP 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 GCP 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Google GCP 会产生费用。
+
+:::
+
+- [Google GCP Account](https://console.cloud.google.com/):用于创建部署 Rancher 和 Kubernetes 的资源。
+- [Google GCP 项目](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project):如果你没有的话,请访问此链接查看如何创建 GCP 项目。
+- [Google GCP ServiceAccount](https://cloud.google.com/iam/docs/creating-managing-service-account-keys):请访问此链接查看如何创建 GCP ServiceAccount 和 Token 文件。
+- [Terraform](https://www.terraform.io/downloads.html):用于在 Google GCP 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/gcp` 命令,进入包含 Terraform 文件的 GCP 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `gcp_account_json` - 替换为 GCP ServiceAccount 文件路径和文件名。
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/gcp) 了解更多信息。
+ 建议包括:
+ - `gcp_region` - Google GCP 区域。GCP 的默认区域 (`us-east4`) 不一定是距离你最近的区域。建议修改为距离你最近的区域。
+ - `gcp_zone` - Google GCP 区域。GCP 的默认区域 (`us-east4-a`) 不一定是距离你最近的区域。建议修改为距离你最近的区域。
+ - `prefix` - 所有创建资源的前缀
+ - `machine_type` - 使用的计算实例大小,最小规格为 `n1-standard-1`。如果在预算范围内,可以使用 `n1-standard-2` 或 `n1-standard-4`。
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 16 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/gcp` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 GCP 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/gcp` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md
new file mode 100644
index 00000000000..b4f0395ac2d
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md
@@ -0,0 +1,152 @@
+---
+title: Helm CLI 快速入门
+---
+
+本文提供了快速安装 Rancher 的方法。
+
+这些说明假设你有一个 Linux 虚拟机,并能从本地工作站与之通信。Rancher 将安装在 Linux 主机上。你将需要检索该主机的 IP 地址,以便从本地工作站访问 Rancher。Rancher 旨在远程管理 Kubernetes 集群,因此 Rancher 管理的任何 Kubernetes 集群也都需要能够访问该 IP 地址。
+
+我们不建议在本地安装 Rancher,因为它会产生网络问题。如果你在 localhost 上安装 Rancher,Rancher 无法与下游 Kubernetes 集群通信,因此在 localhost 上你无法测试 Rancher 的集群配置和集群管理功能。
+
+你的 Linux 主机可以位于任何地方。例如,它可以是 Amazon EC2 实例、Digital Ocean Droplet 或 Azure 虚拟机。其他 Rancher 文档也经常称它们为“节点”。部署 Linux 主机的一种方法是设置一个 Amazon EC2 实例,如[本教程](../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md)中所示。
+
+完整的安装要求在[这里](../../installation-and-upgrade/installation-requirements/installation-requirements.md)。
+
+## 在 Linux 上安装 K3s
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/)。
+
+如需指定 K3s(Kubernetes)版本,在运行 K3s 安装脚本时使用 `INSTALL_K3S_VERSION` 环境变量(例如 `INSTALL_K3S_VERSION="v1.24.10+k3s1"`)。
+
+在 Linux 主机上运行以下命令来安装 K3s 集群:
+
+```
+curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION= sh -s - server --cluster-init
+```
+
+`--cluster-init` 允许 K3s 使用嵌入式 etcd 作为数据存储,并能够转换为 HA 设置。请参阅[嵌入式数据库的高可用性](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/)。
+
+保存 Linux 主机的 IP。
+
+## 将 kubeconfig 保存到你的工作站
+
+kubeconfig 文件对于访问 Kubernetes 集群非常重要。从 Linux 主机复制 `/etc/rancher/k3s/k3s.yaml` 中的文件,并将其保存到本地工作站的 `~/.kube/config` 目录中。一种方法是使用 `scp` 工具并在本地计算机上运行此命令:
+
+
+
+
+```
+scp root@:/etc/rancher/k3s/k3s.yaml ~/.kube/config
+```
+
+在某些情况下,它可能需要确保你的 shell 定义了环境变量 `KUBECONFIG=~/.kube/config`,例如,它可以在你的配置文件或 rc 文件中导出。
+
+
+
+
+默认情况下不能识别“scp”命令,所以我们需要先安装一个模块。
+
+在 Windows Powershell 中:
+
+```
+Find-Module Posh-SSH
+Install-Module Posh-SSH
+
+## 获取远程 kubeconfig 文件
+scp root@:/etc/rancher/k3s/k3s.yaml $env:USERPROFILE\.kube\config
+```
+
+
+
+
+## 在 kubeconfig 中编辑 Rancher Server URL
+
+在 kubeconfig 文件中,你需要将 `server` 字段的值更改为 `:6443`。你可以通过端口 6443 访问 Kubernetes API Server,通过端口 80 和 443 访问 Rancher Server。你需要进行此编辑,以便你从本地工作站运行 Helm 或 kubectl 命令时,能够与安装了 Rancher 的 Kubernetes 集群进行通信。
+
+
+
+
+打开 kubeconfig 文件进行编辑的一种方法是使用 Vim:
+
+```
+vi ~/.kube/config
+```
+
+输入 `i` 以打开 Vim 的插入模式。要保存你的工作,请按 `Esc`。然后输入 `:wq` 并按 `Enter`。
+
+
+
+
+
+在 Windows Powershell 中,你可以使用 `notepad.exe` 来编辑 kubeconfig 文件:
+
+```
+notepad.exe $env:USERPROFILE\.kube\config
+```
+
+编辑完成后,按 `ctrl+s` 或转到 `File > Save` 来保存你的工作。
+
+
+
+
+## 使用 Helm 来安装 Rancher
+
+从本地工作站运行以下命令。你需要先安装 [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) 和 [helm](https://helm.sh/docs/intro/install/):
+
+:::note
+
+要查看自定义 cert-manager 安装的选项(包括集群使用 PodSecurityPolicies 的情况),请参阅 [cert-manager 文档](https://artifacthub.io/packages/helm/cert-manager/cert-manager#configuration)。
+
+:::
+
+```
+helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
+
+kubectl create namespace cattle-system
+
+kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download//cert-manager.crds.yaml
+
+helm repo add jetstack https://charts.jetstack.io
+
+helm repo update
+
+helm install cert-manager jetstack/cert-manager \
+ --namespace cert-manager \
+ --create-namespace
+
+# Windows Powershell
+helm install cert-manager jetstack/cert-manager `
+ --namespace cert-manager `
+ --create-namespace
+```
+
+安装 Rancher 的最终命令如下。该命令需要一个将流量转发到 Linux 主机的域名。为了简化本教程,你可以使用假域名。`.sslip.io` 是一个假域名的例子。
+
+要安装特定的 Rancher 版本,请使用 `--version` 标志(例如,`--version 2.6.6`)。否则,默认安装最新的 Rancher。请参阅[选择 Rancher 版本](../../installation-and-upgrade/resources/choose-a-rancher-version.md)。
+
+对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+请注意,密码至少需要 12 个字符。
+
+```
+helm install rancher rancher-latest/rancher \
+ --namespace cattle-system \
+ --set hostname=.sslip.io \
+ --set replicas=1 \
+ --set bootstrapPassword=
+
+# Windows Powershell
+helm install rancher rancher-latest/rancher `
+ --namespace cattle-system `
+ --set hostname=.sslip.io `
+ --set replicas=1 `
+ --set bootstrapPassword=
+```
+
+现在,如果你在 Web 浏览器中导航到 `.sslip.io`,你应该会看到 Rancher UI。
+
+为了简化说明,我们使用了一个假域名和自签名证书来进行安装。因此,你可能需要在 Web 浏览器中添加一个安全例外来查看 Rancher UI。请注意,对于生产安装,你需要具有负载均衡器、真实域名和真实证书的高可用性设置。
+
+这些说明还省略了完整的安装要求和其他安装选项。如果你对这些步骤有任何疑问,请参阅完整的 [Helm CLI 安装文档](../../installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/install-upgrade-on-a-kubernetes-cluster.md)。
+
+要使用新的 Rancher Server 来启动新的 Kubernetes 集群,你可能需要在 Rancher 中设置云凭证。有关更多信息,请参阅[使用 Rancher 启动 Kubernetes 集群](../../../how-to-guides/new-user-guides/launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md
new file mode 100644
index 00000000000..16ebb833b2a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md
@@ -0,0 +1,76 @@
+---
+title: Rancher Hetzner Cloud 快速入门指南
+description: 阅读此分步 Rancher Hetzner Cloud 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 Hetzner Cloud 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Hetzner Cloud 会产生费用。
+
+:::
+
+- [Hetzner Cloud 账号](https://www.hetzner.com):用于运行服务器和集群。
+- [Hetzner API 访问密钥](https://docs.hetzner.cloud/#getting-started):如果你没有的话,请参考说明创建一个。
+- [Terraform](https://www.terraform.io/downloads.html):用于在 Hetzner 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/hcloud` 命令,进入包含 Terraform 文件的 Hetzner 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `hcloud_token` - 替换为 Hetzner API 访问密钥。
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [Hetzner Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/hcloud) 了解更多信息。
+ 建议包括:
+
+ - `prefix` - 所有创建资源的前缀
+ - `instance_type` - 实例类型,至少需要是 `cx21`。
+ - `hcloud_location`- Hetzner Cloud 位置。选择最近的位置,而不是使用默认位置(`fsn1`)。
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 15 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/hcloud` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 Hetzner 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/hcloud` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/linode.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/linode.md
new file mode 100644
index 00000000000..9b6a00f6f7e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/linode.md
@@ -0,0 +1,79 @@
+---
+title: Rancher Linode 快速入门指南
+description: 阅读此分步 Rancher Linode 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 Linode 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Linode 会产生费用。
+
+:::
+
+- [Linode 账号](https://linode.com): 用于运行服务器和集群。
+- [Linode 访问密钥](https://www.linode.com/docs/products/tools/api/guides/manage-api-tokens/): 用于权限认证的 Linode 访问密钥。
+- [Terraform](https://www.terraform.io/downloads.html): 用于在 Linode 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/linode` 命令,进入包含 Terraform 文件的 Linode 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `linode_token` - 上面提到的 Linode 访问密钥。
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [Linode Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/linode) 了解更多信息。
+ 建议包括:
+ - `linode_region` - 创建服务器以及集群的目标 Linode 区域。
+ - 默认: `eu-central`
+ - 完整的区域列表, 请参照[官方的可用区域页面](https://www.linode.com/global-infrastructure/availability/).
+ - `prefix` - 所有创建资源的前缀
+ - `linode_type` - 所有的 Linode 资源使用的类型/计划
+ - 默认: `g6-standard-2`
+ - 完整的计划列表, 请参照[官方的计划类型页面](https://www.linode.com/docs/products/compute/compute-instances/plans/).
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 15 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/linode` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 Linode 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/linode` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md
new file mode 100644
index 00000000000..76d320a862e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md
@@ -0,0 +1,76 @@
+---
+title: Rancher Outscale 快速入门指南
+description: 阅读此分步 Rancher Outscale 指南,以快速部署带有单节点下游 Kubernetes 集群的 Rancher Server。
+---
+
+你可以参考以下步骤,在 Outscale 的单节点 K3s Kubernetes 集群中快速部署 Rancher Server,并附加一个单节点下游 Kubernetes 集群。
+
+:::note
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+:::caution
+
+部署到 Outscale 会产生费用。
+
+:::
+
+- [Outscale 账号](https://en.outscale.com/):用于运行服务器和集群。
+- [Outscale 访问密钥](https://docs.outscale.com/en/userguide/About-Access-Keys.html):如果你没有的话,请按照说明创建一个 Outscale 访问密钥。
+- [Terraform](https://www.terraform.io/downloads.html):用于在 Outscale 中配置服务器和集群。
+
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/outscale` 命令,进入包含 Terraform 文件的 Outscale 文件夹。
+
+3. 把 `terraform.tfvars.example` 文件重命名为 `terraform.tfvars`。
+
+4. 编辑 `terraform.tfvars` 文件,并替换以下变量:
+ - `access_key_id` - 替换为 Outscale 访问密钥
+ - `secret_key_id` - 替换为 Outscale 密文密钥
+ - `rancher_server_admin_password` - 替换为创建 Rancher Server 的 admin 账号的密码(最少 12 字符)
+
+5. **可选**:修改 `terraform.tfvars` 中的可选参数。
+ 参见 [Quickstart Readme](https://github.com/rancher/quickstart) 以及 [Outscale Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/outscale) 了解更多信息。
+ 建议包括:
+ - `region` - Outscale 区域。Outscale 的默认区域不一定是距离你最近的区域。建议修改为距离你最近的区域(`eu-west-2`)。
+ - `prefix` - 所有创建资源的前缀
+ - `instance_type` - 实例类型,至少需要是 `tinav3.c2r4p3`。
+
+6. 执行 `terraform init`。
+
+7. 执行 `terraform apply --auto-approve` 以初始化环境。然后,等待命令行工具返回以下信息:
+
+ ```
+ Apply complete! Resources: 21 added, 0 changed, 0 destroyed.
+
+ Outputs:
+
+ rancher_node_ip = xx.xx.xx.xx
+ rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io
+ workload_node_ip = yy.yy.yy.yy
+ ```
+
+8. 将以上输出中的 `rancher_server_url` 粘贴到浏览器中。在登录页面中登录(默认用户名为 `admin`,密码为在 `rancher_server_admin_password` 中设置的密码)。
+9. 使用 `quickstart/rancher/outscale` 中生成的 `id_rsa` 密钥 SSH 到 Rancher Server。
+
+#### 结果
+
+两个 Kubernetes 集群已部署到你的 Outscale 账户中,一个运行 Rancher Server,另一个为实验部署做好准备。请注意,虽然这种设置是探索 Rancher 功能的好方法,但在生产环境中,应遵循我们的高可用设置指南。用于虚拟机的 SSH 密钥是自动生成的,存储在模块目录中。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/outscale` 文件夹,然后执行 `terraform destroy --auto-approve`。
+
+2. 等待命令行界面显示资源已删除的消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/prime.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/prime.md
new file mode 100644
index 00000000000..094508ff5a6
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/prime.md
@@ -0,0 +1,7 @@
+---
+title: Rancher Prime
+---
+
+Rancher 在 v2.7 中引入了 Rancher Prime,这是 Rancher 企业级产品的进化。Rancher Prime 是基于相同源代码构建的商业化、企业级的新版本。因此,Rancher 的产品将继续保持 100% 开源,并通过安全、延长生命周期、访问重点架构和 Kubernetes 公告体现额外价值。Rancher Prime 还将提供选项,让用户获得创新 Rancher 项目的生产支持。使用 Rancher Prime,安装 asset 会托管在由 Rancher 持有和管理的可信镜像仓库中。
+
+要开始使用 Rancher Prime,请[转到此页面](https://www.rancher.com/quick-start)并填写表格。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md
new file mode 100644
index 00000000000..1622c44a7b0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md
@@ -0,0 +1,51 @@
+---
+title: Vagrant 快速入门
+---
+
+你可以参考以下步骤快速部署 Rancher Server,并附加一个单节点集群。
+
+:::caution
+
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../../installation-and-upgrade/installation-and-upgrade.md)。
+
+:::
+
+## 先决条件
+
+- [Vagrant](https://www.vagrantup.com):Vagrant 是必需的,用于根据 Vagrantfile 配置主机。
+- [Virtualbox](https://www.virtualbox.org):需要把 Vagrant 配置的虚拟机配置到 VirtualBox。
+- 至少 4GB 的可用内存。
+
+### 注意
+- Vagrant 需要使用插件来创建 VirtualBox 虚拟机。请执行以下命令进行安装:
+
+ `vagrant plugin install vagrant-vboxmanage`
+
+ `vagrant plugin install vagrant-vbguest`
+
+## 开始使用
+
+1. 使用命令行工具,执行 `git clone https://github.com/rancher/quickstart` 把 [Rancher Quickstart](https://github.com/rancher/quickstart) 克隆到本地。
+
+2. 执行 `cd quickstart/rancher/vagrant` 命令,进入包含 Vagrantfile 文件的文件夹。
+
+3. **可选**:编辑 `config.yaml` 文件:
+
+ - 根据需要更改节点数和内存分配(`node.count`, `node.cpus`, `node.memory`)
+ - 更改 `admin` 的密码以登录 Rancher。(`admin_password`)
+
+4. 执行 `vagrant up --provider=virtualbox` 以初始化环境。
+
+5. 配置完成后,在浏览器中打开 `https://192.168.56.101`。默认的用户名和密码是 `admin/adminPassword`。
+
+**结果**:Rancher Server 和你的 Kubernetes 集群已安装在 VirtualBox 上。
+
+### 后续操作
+
+使用 Rancher 创建 deployment。详情请参见[创建 Deployment](../deploy-workloads/deploy-workloads.md)。
+
+## 销毁环境
+
+1. 进入 `quickstart/rancher/vagrant` 文件夹,然后执行 `vagrant destroy -f`。
+
+2. 等待所有资源已删除的确认消息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/deploy-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/deploy-workloads.md
new file mode 100644
index 00000000000..b0aa55e4fb4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/deploy-workloads.md
@@ -0,0 +1,12 @@
+---
+title: 部署工作负载
+---
+
+
+
+
+
+这些指南指导你完成一个应用的部署,包括如何将应用暴露在集群之外使用。
+
+- [部署带有 Ingress 的工作负载](workload-ingress.md)
+- [部署带有 NodePort 的工作负载](nodeports.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/nodeports.md
new file mode 100644
index 00000000000..bf2294d4c06
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/nodeports.md
@@ -0,0 +1,138 @@
+---
+title: 部署带有 NodePort 的工作负载
+---
+
+### 先决条件
+
+你已有一个正在运行的集群,且该集群中有至少一个节点。
+
+### 1. 部署工作负载
+
+你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
+
+在本文的工作负载中,你将部署一个 Rancher Hello-World 应用。
+
+1. 点击 **☰ > 集群管理**。
+1. 在**集群**页面中,进入需要部署工作负载的集群,然后单击 **Explore**。
+1. 点击**工作负载**。
+1. 单击**创建**。
+1. 为工作负载设置**名称**。
+1. 在**容器镜像**字段中,输入 `rancher/hello-world`。注意区分大小写。
+1. 点击**添加端口**。
+1. 在**服务类型**下拉菜单中,确保选择了 **NodePort**。
+
+ 
+
+1. 在**发布容器端口**字段中,输入端口`80`。
+
+ 
+
+1. 单击**创建**。
+
+**结果**:
+
+* 工作负载已部署。此过程可能需要几分钟。
+* 当工作负载完成部署后,它的状态会变为 **Active**。你可以从项目的**工作负载**页面查看其状态。
+
+
+
+### 2. 查看应用
+
+在**工作负载**页面中,点击工作负载下方的链接。如果 deployment 已完成,你的应用会打开。
+
+### 注意事项
+
+如果使用云虚拟机,你可能无法访问运行容器的端口。这种情况下,你可以使用 `Execute Shell` 在本地主机的 SSH 会话中测试 Nginx。如果可用的话,使用工作负载下方的链接中 `:` 后面的端口号。在本例中,端口号为 `31568`。
+
+```html
+gettingstarted@rancher:~$ curl http://localhost:31568
+
+
+
+ Rancher
+
+
+
+
+
+ Hello world!
+ My hostname is hello-world-66b4b9d88b-78bhx
+
+
k8s services found 2
+
+ INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
+
+ KUBERNETES tcp://10.43.0.1:443
+
+
+
+
+
+
+ Show request details
+
+
Request info
+ Host: 172.22.101.111:31411
+ Pod: hello-world-66b4b9d88b-78bhx
+
+ Accept: [*/*]
+
+ User-Agent: [curl/7.47.0]
+
+
+
+
+
+
+gettingstarted@rancher:~$
+
+```
+
+### 已完成!
+
+恭喜!你已成功通过 NodePort 部署工作负载。
+
+#### 后续操作
+
+使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
+
+- [Amazon AWS:销毁环境](../deploy-rancher-manager/aws.md#销毁环境)
+- [DigitalOcean:销毁环境](../deploy-rancher-manager/digitalocean.md#销毁环境)
+- [Vagrant:销毁环境](../deploy-rancher-manager/vagrant.md#销毁环境)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
new file mode 100644
index 00000000000..86f8017f09e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md
@@ -0,0 +1,72 @@
+---
+title: 部署带有 Ingress 的工作负载
+---
+
+### 先决条件
+
+你已有一个正在运行的集群,且该集群中有至少一个节点。
+
+### 1. 部署工作负载
+
+你可以开始创建你的第一个 Kubernetes [工作负载](https://kubernetes.io/docs/concepts/workloads/)。工作负载是一个对象,其中包含 pod 以及部署应用所需的其他文件和信息。
+
+在本文的工作负载中,你将部署一个 Rancher Hello-World 应用。
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 点击**工作负载**。
+1. 单击**创建**。
+1. 点击 **Deployment**。
+1. 为工作负载设置**名称**。
+1. 在**容器镜像**字段中,输入 `rancher/hello-world`。注意区分大小写。
+1. 在 `Service Type` 点击 **Add Port** 和 `Cluster IP`,并在 **Private Container Port** 字段中输入`80`。你可以将 `Name` 留空或指定名称。通过添加端口,你可以访问集群内外的应用。有关详细信息,请参阅 [Service](../../../pages-for-subheaders/workloads-and-pods.md#services)。
+1. 单击**创建**。
+
+**结果**:
+
+* 工作负载已部署。此过程可能需要几分钟。
+* 当工作负载完成部署后,它的状态会变为 **Active**。你可以从项目的**工作负载**页面查看其状态。
+
+### 2. 通过 Ingress 暴露应用
+
+现在应用已启动并运行,你需要暴露应用以让其他服务连接到它。
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+
+1. 点击**服务发现 > Ingresses**。
+
+1. 点击**创建**。
+
+1. 在选择**命名空间**时,你需要选择在创建 deployment 时使用的命名空间。否则,在步骤8中选择**目标服务**时,你的 deployment 会不可用。
+
+1. 输入**名称**,例如 **hello**。
+
+1. 指定**路径**,例如 `/hello`。
+
+1. 在**目标服务**字段的下拉菜单中,选择你为服务设置的名称。
+
+1. 在**端口**字段中的下拉菜单中,选择 `80`。
+
+1. 点击右下角的**创建**。
+
+**结果**:应用分配到了一个 `sslip.io` 地址并暴露。这可能需要一两分钟。
+
+
+### 查看应用
+
+在 **Deployments** 页面中,找到你 deployment 的 **endpoint** 列,然后单击一个 endpoint。可用的 endpoint 取决于你添加到 deployment 中的端口配置。如果你看不到随机分配端口的 endpoint,请将你在创建 Ingress 时指定的路径尾附到 IP 地址上。例如,如果你的 endpoint 是 `xxx.xxx.xxx.xxx` 或 `https://xxx.xxx.xxx.xxx`,把它修改为 `xxx.xxx.xxx.xxx/hello` 或 `https://xxx.xxx.xxx.xxx/hello`。
+
+应用将在另一个窗口中打开。
+
+#### 已完成!
+
+恭喜!你已成功通过 Ingress 部署工作负载。
+
+#### 后续操作
+
+使用完沙盒后,你需要清理 Rancher Server 和集群。详情请参见:
+
+- [Amazon AWS:销毁环境](../deploy-rancher-manager/aws.md#销毁环境)
+- [DigitalOcean:销毁环境](../deploy-rancher-manager/digitalocean.md#销毁环境)
+- [Vagrant:销毁环境](../deploy-rancher-manager/vagrant.md#销毁环境)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/quick-start-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/quick-start-guides.md
similarity index 58%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/quick-start-guides.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/quick-start-guides.md
index fd0013e0777..e3e750dc9b4 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/quick-start-guides.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/getting-started/quick-start-guides/quick-start-guides.md
@@ -4,7 +4,7 @@ title: Rancher 部署快速入门指南
:::caution
-本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](installation-and-upgrade.md)。
+本章节中提供的指南,旨在帮助你快速启动一个用于 Rancher 的沙盒,以评估 Rancher 是否能满足你的使用需求。快速入门指南不适用于生产环境。如果你需要获取生产环境的操作指导,请参见[安装](../installation-and-upgrade/installation-and-upgrade.md)。
:::
@@ -12,6 +12,6 @@ title: Rancher 部署快速入门指南
我们提供以下快速入门指南:
-- [部署 Rancher Server](deploy-rancher-manager.md):使用最方便的方式运行 Rancher。
+- [部署 Rancher Server](deploy-rancher-manager/deploy-rancher-manager.md):使用最方便的方式运行 Rancher。
-- [部署工作负载](deploy-rancher-workloads.md):部署一个简单的[工作负载](https://kubernetes.io/docs/concepts/workloads/)并公暴露工作负载,以从集群外部访问工作负载。
+- [部署工作负载](deploy-workloads/deploy-workloads.md):部署一个简单的[工作负载](https://kubernetes.io/docs/concepts/workloads/)并公暴露工作负载,以从集群外部访问工作负载。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-user-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/advanced-user-guides.md
similarity index 76%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-user-guides.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/advanced-user-guides.md
index 0f8f5a7df69..ef7e395ad3c 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/advanced-user-guides.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/advanced-user-guides.md
@@ -2,6 +2,10 @@
title: 高级用户指南
---
-高级用户指南是“问题导向”的文档,用户可以从中学习如何解决问题。高级用于指南与新用户指南的主要区别在于,高级用户指南面向更有经验或更高级的用户,这些用户对文档有更多的技术需求,而且已经了解 Rancher 及其功能。他们知道自己需要做什么,只是需要额外的指导来完成更复杂的任务。
+
+
+
+
+高级用户指南是“问题导向”的文档,用户可以从中学习如何解决问题。高级用户指南与新用户指南的主要区别在于,高级用户指南面向更有经验或更高级的用户,这些用户对文档有更多的技术需求,而且已经了解 Rancher 及其功能。他们知道自己需要做什么,只是需要额外的指导来完成更复杂的任务。
应该注意的是,新用户指南和高级用户指南都没有提供详细的解释或讨论(这些文档不包括在本部分)。操作指南侧重于引导用户通过可重复、有效的步骤来学习新技能、掌握某些操作或解决某些问题。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md
new file mode 100644
index 00000000000..ff6b6200e19
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/cis-scan-guides.md
@@ -0,0 +1,17 @@
+---
+title: CIS 扫描指南
+---
+
+
+
+
+
+- [安装 Rancher CIS Benchmark](install-rancher-cis-benchmark.md)
+- [卸载 Rancher CIS Benchmark](uninstall-rancher-cis-benchmark.md)
+- [运行扫描](run-a-scan.md)
+- [定时运行扫描](run-a-scan-periodically-on-a-schedule.md)
+- [跳过测试](skip-tests.md)
+- [查看报告](view-reports.md)
+- [为 Rancher CIS Benchmark 启用告警](enable-alerting-for-rancher-cis-benchmark.md)
+- [为定时扫描配置告警](configure-alerts-for-periodic-scan-on-a-schedule.md)
+- [为集群扫描创建自定义 Benchmark 版本](create-a-custom-benchmark-version-to-run.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md
new file mode 100644
index 00000000000..643a7122e37
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md
@@ -0,0 +1,40 @@
+---
+title: 为定时扫描配置告警
+---
+
+你可以定时运行 ClusterScan。
+
+你还可以为定时扫描指定是否在扫描完成时发出告警。
+
+只有定时运行的扫描才支持告警。
+
+CIS Benchmark 应用支持两种类型的告警:
+
+- 扫描完成告警:此告警在扫描运行完成时发出。告警包括详细信息,包括 ClusterScan 的名称和 ClusterScanProfile 的名称。
+- 扫描失败告警:如果扫描中有一些测试失败或扫描处于 `Fail` 状态,则会发出此告警。
+
+:::note 先决条件:
+
+为 `rancher-cis-benchmark` 启用告警之前,确保安装了 `rancher-monitoring` 应用并配置了接收器(Receiver)和路由(Route)。详情请参见[本章节](../../../reference-guides/monitoring-v2-configuration/receivers.md)。
+
+在为 `rancher-cis-benchmark` 告警配置路由时,你可以使用键值对 `job:rancher-cis-scan` 来指定匹配。详情请查看[路由配置示例](../../../reference-guides/monitoring-v2-configuration/receivers.md#cis-扫描告警的示例路由配置)。
+
+:::
+
+要为定时运行的扫描配置告警:
+
+1. 请在 `rancher-cis-benchmark` 应用程序上启用告警。详情请参见[本页](../../../how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md)。
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要运行 CIS 扫描的集群,然后单击 **Explore**。
+1. 点击 **CIS Benchmark > 扫描**。
+1. 单击**创建**。
+1. 选择集群扫描配置文件。该配置文件确定要使用哪个 CIS Benchmark 版本以及要执行哪些测试。如果你选择 Default 配置文件,则 CIS Operator 将选择适用于它所在的 Kubernetes 集群类型的配置文件。
+1. 选择**定时运行扫描**的选项。
+1. 在**调度**字段中输入有效的 [Cron 表达式](https://en.wikipedia.org/wiki/Cron#CRON_expression)。
+1. 选中**告警**下告警类型旁边的框。
+1. (可选)选择一个**保留计数**,表示为这个定时扫描维护的报告数量。默认情况下,此计数为 3。超过此保留限制时,旧报告将被删除。
+1. 单击**创建**。
+
+**结果**:扫描运行,并根据设置的 cron 表达式重新调度。如果在 `rancher-monitoring` 应用下配置了路由和接收器,则会在扫描完成时发出告警。
+
+每次运行扫描都会生成一份带有扫描结果的报告。如需查看最新的结果,请单击显示的扫描名称。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md
new file mode 100644
index 00000000000..fe8477f7b55
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md
@@ -0,0 +1,9 @@
+---
+title: 为集群扫描创建自定义 Benchmark 版本
+---
+
+某些 Kubernetes 集群可能需要自定义配置 Benchmark 测试。例如,Kubernetes 配置文件或证书的路径可能与上游 CIS Benchmark 的标准位置不同。
+
+现在,你可以使用 `rancher-cis-benchmark` 应用来创建自定义 Benchmark 版本,从而运行集群扫描。
+
+有关详细信息,请参阅[此页面](../../../integrations-in-rancher/cis-scans/custom-benchmark.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md
new file mode 100644
index 00000000000..55c34dcdf5c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md
@@ -0,0 +1,20 @@
+---
+title: 为 Rancher CIS Benchmark 启用告警
+---
+
+你可以配置告警,从而将告警发送给定时运行的扫描。
+
+:::note 先决条件:
+
+为 `rancher-cis-benchmark` 启用告警之前,确保安装了 `rancher-monitoring` 应用并配置了接收器(Receiver)和路由(Route)。详情请参见[本章节](../../../reference-guides/monitoring-v2-configuration/receivers.md)。
+
+在为 `rancher-cis-benchmark` 告警配置路由时,你可以使用键值对 `job:rancher-cis-scan` 来指定匹配。详情请查看[路由配置示例](../../../reference-guides/monitoring-v2-configuration/receivers.md#cis-扫描告警的示例路由配置)。
+
+:::
+
+在安装或升级 `rancher-cis-benchmark` Helm Chart 时,在 `values.yaml` 中将以下标志设置为 `true`:
+
+```yaml
+alerts:
+ enabled: true
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md
new file mode 100644
index 00000000000..814d38c5349
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md
@@ -0,0 +1,17 @@
+---
+title: 安装 Rancher CIS Benchmark
+---
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要安装 CIS Benchmark 的集群,然后单击 **Explore**。
+1. 在左侧导航栏中,单击 **Apps > Charts**。
+1. 单击 **CIS Benchmark**。
+1. 单击**安装**。
+
+**结果**:CIS 扫描应用已经部署在 Kubernetes 集群上。
+
+:::note
+
+如果你使用 Kubernetes v1.24 或更早版本,并且具有使用 [Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) (PSP) 加固的集群,则 CIS Benchmark 4.0.0 及更高版本会默认禁用 PSP。要在 PSP 加固集群上安装 CIS Benchmark,请在安装 Chart 之前将 values 中的 `global.psp.enabled` 设置为 `true`。[Pod 安全准入](../../new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md) (PSA) 加固集群不受影响。
+
+:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md
new file mode 100644
index 00000000000..d43567fbb25
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md
@@ -0,0 +1,20 @@
+---
+title: 定时运行扫描
+---
+
+要定时运行 ClusterScan:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要运行 CIS 扫描的集群,然后单击 **Explore**。
+1. 点击 **CIS Benchmark > 扫描**。
+1. 选择集群扫描配置文件。该配置文件确定要使用哪个 CIS Benchmark 版本以及要执行哪些测试。如果你选择 Default 配置文件,则 CIS Operator 将选择适用于它所在的 Kubernetes 集群类型的配置文件。
+1. 选择**定时运行扫描**的选项。
+1. 将一个有效的 Cron 表达式 填写到**调度**字段。
+1. 选择一个**保留计数**,表示为这个定时扫描维护的报告数量。默认情况下,此计数为 3。超过此保留限制时,旧报告将被删除。
+1. 单击**创建**。
+
+**结果**:扫描运行,并根据设置的 cron 表达式重新调度。**下一次扫描**的值表示下次运行此扫描的时间。
+
+每次运行扫描都会生成一份带有扫描结果的报告。如需查看最新的结果,请单击显示的扫描名称。
+
+你还可以在扫描详情页面上的**报告**下拉菜单中查看之前的报告。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md
new file mode 100644
index 00000000000..e658029233e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md
@@ -0,0 +1,22 @@
+---
+title: 运行扫描
+---
+
+创建 ClusterScan 自定义资源后,它会在集群上为所选 ClusterScanProfile 启动新的 CIS 扫描。
+
+:::note
+
+请注意,目前一个集群每次只能运行一次 CIS 扫描。如果你创建了多个 ClusterScan 自定义资源,operator 只能一个接一个地运行这些资源。一个扫描完成之前,其余 ClusterScan 自定义资源将处于 “Pending” 状态。
+
+:::
+
+要运行扫描:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要运行 CIS 扫描的集群,然后单击 **Explore**。
+1. 点击 **CIS Benchmark > 扫描**。
+1. 单击**创建**。
+1. 选择集群扫描配置文件。该配置文件确定要使用哪个 CIS Benchmark 版本以及要执行哪些测试。如果你选择 Default 配置文件,则 CIS Operator 将选择适用于它所在的 Kubernetes 集群类型的配置文件。
+1. 单击**创建**。
+
+**结果**:已生成带有扫描结果的报告。如需查看结果,请单击显示的扫描名称。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md
new file mode 100644
index 00000000000..230bb1c7696
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md
@@ -0,0 +1,34 @@
+---
+title: 跳过测试
+---
+
+用户可以在测试配置文件中自定义要跳过的测试,然后 CIS 扫描可以使用该配置文件运行。
+
+要跳过测试,你需要创建自定义一个 CIS 扫描配置文件。配置文件包含 CIS 扫描的配置,包括要使用的 Benchmark 测试版本以及要在该 Benchmark 测试中跳过的测试。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要运行 CIS 扫描的集群,然后单击 **Explore**。
+1. 单击 **CIS Benchmark > 配置文件**。
+1. 在这里,你可以使用多种方式来创建配置文件。要创建新配置文件,单击**创建**并在 UI 中填写表单。要基于现有配置文件来创建新配置文件,请转到现有配置文件并单击**⋮ 克隆**。如果你在填写表单,请使用测试 ID 添加要跳过的测试,并参考相关的 CIS Benchmark。如果你将新的测试配置文件创建为 YAML,你需要在 `skipTests` 参数中添加要跳过的测试的 ID。你还需要为配置文件命名:
+
+ ```yaml
+ apiVersion: cis.cattle.io/v1
+ kind: ClusterScanProfile
+ metadata:
+ annotations:
+ meta.helm.sh/release-name: clusterscan-operator
+ meta.helm.sh/release-namespace: cis-operator-system
+ labels:
+ app.kubernetes.io/managed-by: Helm
+ name: ""
+ spec:
+ benchmarkVersion: cis-1.5
+ skipTests:
+ - "1.1.20"
+ - "1.1.21"
+ ```
+1. 单击**创建**。
+
+**结果**:已创建一个新的 CIS 扫描配置文件。
+
+使用此配置文件[运行扫描](./run-a-scan.md)时,会跳过定义的跳过测试。跳过的测试将在生成的报告中标记为 `Skip`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md
new file mode 100644
index 00000000000..83d36169627
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md
@@ -0,0 +1,9 @@
+---
+title: 卸载 Rancher CIS Benchmark
+---
+
+1. 在**集群**仪表板中,单击左侧导航的 **Apps > Installed Apps**。
+1. 前往 `cis-operator-system` 命名空间,并选中 `rancher-cis-benchmark-crd` 和 `rancher-cis-benchmark` 旁边的框。
+1. 单击**删除**并确认**删除**。
+
+**结果**:已卸载 `rancher-cis-benchmark` 应用。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md
new file mode 100644
index 00000000000..adfcce53799
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md
@@ -0,0 +1,12 @@
+---
+title: 查看报告
+---
+
+要查看生成的 CIS 扫描报告:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要运行 CIS 扫描的集群,然后单击 **Explore**。
+1. 点击 **CIS Benchmark > 扫描**。
+1. **扫描**页面将显示生成的报告。要查看详细报告,请转到扫描报告并单击报告名称。
+
+你可以从扫描列表或扫描详情页面下载报告。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
new file mode 100644
index 00000000000..bb7cf785839
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/configure-layer-7-nginx-load-balancer.md
@@ -0,0 +1,260 @@
+---
+title: 7 层 NGINX 负载均衡器上的 TLS 终止(Docker 安装)
+---
+
+如果你的开发或测试环境要求在负载均衡器上终止 TLS/SSL,而不是在 Rancher Server 上,请部署 Rancher 并配置负载均衡器。
+
+如果要在基础设施中对 TLS 集中进行终止,请使用 7 层负载均衡器。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。
+
+本文中的安装步骤将引导你使用单个容器部署 Rancher,并提供 7 层 NGINX 负载均衡器的示例配置。
+
+## 操作系统,Docker,硬件和网络要求
+
+请确保你的节点满足常规的[安装要求](../../pages-for-subheaders/installation-requirements.md)。
+
+## 安装概要
+
+
+## 1. 配置 Linux 主机
+
+根据我们的[要求](../../pages-for-subheaders/installation-requirements.md)配置一个 Linux 主机来启动 Rancher Server。
+
+## 2. 选择一个 SSL 选项并安装 Rancher
+
+出于安全考虑,使用 Rancher 时请使用 SSL(Secure Sockets Layer)。SSL 保护所有 Rancher 网络通信(如登录和与集群交互)的安全。
+
+:::note 你是否需要:
+
+- 完成离线安装。
+- 记录所有 Rancher API 的事务。
+
+继续之前,请参见[高级选项](#高级选项)。
+
+:::
+
+选择以下的选项之一:
+
+
+ 选项 A:使用你自己的证书 - 自签名
+
+如果要使用自签名证书来加密通信,你必须在负载均衡器(后续步骤)和 Rancher 容器上安装证书。运行 Docker 命令部署 Rancher,将 Docker 指向你的证书。
+
+:::note 先决条件:
+
+创建自签名证书。
+
+- 证书文件的格式必须是 PEM。
+
+:::
+
+**使用自签名证书安装 Rancher**:
+
+1. 在运行 Docker 命令部署 Rancher 时,将 Docker 指向你的 CA 证书文件。
+
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \
+ rancher/rancher:latest
+ ```
+
+
+
+ 选项 B:使用你自己的证书 - 可信 CA 签名的证书
+
+如果你的集群面向公众,则最好使用由公认 CA 签署的证书。
+
+:::note 先决条件:
+
+- 证书文件的格式必须是 PEM。
+
+:::
+
+**使用授信 CA 签发的证书安装 Rancher**:
+
+如果你使用授信 CA 签发的证书,你无需在 Rancher 容器中安装证书。但是,请确保不要生成和存储默认的 CA 证书(你可以通过将 `--no-cacerts` 参数传递给容器来实现)。
+
+1. 输入以下命令:
+
+ ```
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ rancher/rancher:latest --no-cacerts
+ ```
+
+
+
+## 3. 配置负载均衡器
+
+在 Rancher 容器前使用负载均衡器时,容器无需从端口 80 或端口 443 重定向端口通信。你可以通过传递 `X-Forwarded-Proto: https` 标头禁用此重定向。
+
+负载均衡器或代理必须支持以下内容:
+
+- **WebSocket** 连接
+- **SPDY** / **HTTP/2** 协议
+- 传递/设置以下标头:
+
+ | 标头 | 值 | 描述 |
+ |--------|-------|-------------|
+ | `Host` | 用于访问 Rancher 的主机名。 | 识别客户端所请求的服务器。 |
+ | `X-Forwarded-Proto` | `https` | 识别客户端连接负载均衡器或代理时所用的协议。 **注意**:如果此标头存在,`rancher/rancher` 不会将 HTTP 重定向到 HTTPS。 |
+ | `X-Forwarded-Port` | 用于访问 Rancher 的端口。 | 识别客户端连接到负载均衡器或代理时所用的端口。 |
+ | `X-Forwarded-For` | 客户端 IP 地址 | 识别客户端的原始 IP 地址。 |
+### 示例 NGINX 配置
+
+此 NGINX 配置已在 NGINX 1.14 上进行了测试。
+
+:::note
+
+此 NGINX 配置只是一个示例,可能不适合你的环境。如需查阅完整文档,请参见 [NGINX 负载均衡 - HTTP 负载均衡](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/)。
+
+:::
+
+- 将 `rancher-server` 替换为运行 Rancher 容器的节点的 IP 或主机名。
+- 将两处的 `FQDN` 均替换为 Rancher 的 DNS 名称。
+- 把 `/certs/fullchain.pem` 和 `/certs/privkey.pem` 分别替换为服务器证书和服务器证书密钥的位置。
+
+```
+worker_processes 4;
+worker_rlimit_nofile 40000;
+
+events {
+ worker_connections 8192;
+}
+
+http {
+ upstream rancher {
+ server rancher-server:80;
+ }
+
+ map $http_upgrade $connection_upgrade {
+ default Upgrade;
+ '' close;
+ }
+
+ server {
+ listen 443 ssl http2;
+ server_name FQDN;
+ ssl_certificate /certs/fullchain.pem;
+ ssl_certificate_key /certs/privkey.pem;
+
+ location / {
+ proxy_set_header Host $host;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_pass http://rancher;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ # 此项允许执行的 shell 窗口保持开启,最长可达15分钟。不使用此参数的话,默认1分钟后自动关闭。
+ proxy_read_timeout 900s;
+ proxy_buffering off;
+ }
+ }
+
+ server {
+ listen 80;
+ server_name FQDN;
+ return 301 https://$server_name$request_uri;
+ }
+}
+```
+
+
+
+## 后续操作
+
+- **推荐**:检查单节点[备份](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)。你可能暂时没有需要备份的数据,但是我们建议你在常规使用 Rancher 后创建备份。
+- 创建 Kubernetes 集群:[配置 Kubernetes 集群](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md)。
+
+
+
+## 常见问题和故障排除
+
+如果你需要对证书进行故障排除,请参见[此章节](../../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md)。
+
+## 高级选项
+
+### API 审计
+
+如果你需要记录所有 Rancher API 事务,请将以下标志添加到安装命令中,从而启用 [API 审计](enable-api-audit-log.md)功能。
+
+ -e AUDIT_LEVEL=1 \
+ -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \
+ -e AUDIT_LOG_MAXAGE=20 \
+ -e AUDIT_LOG_MAXBACKUP=20 \
+ -e AUDIT_LOG_MAXSIZE=100 \
+
+### 离线环境
+
+如果你访问此页面是为了完成[离线安装](../../pages-for-subheaders/air-gapped-helm-cli-install.md),则在运行安装命令时,先将你的私有镜像仓库 URL 附加到 Server 标志中。也就是说,在 `rancher/rancher:latest` 前面添加 `` 和私有镜像仓库 URL。
+
+**示例**:
+
+ /rancher/rancher:latest
+
+### 持久化数据
+
+Rancher 使用 etcd 作为数据存储。如果 Rancher 是使用 Docker 安装的,Rancher 会使用嵌入式 etcd。持久化数据位于容器的 `/var/lib/rancher` 路径中。
+
+你可以将主机卷挂载到该位置,来将数据保留在运行它的主机上:
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v /opt/rancher:/var/lib/rancher \
+ --privileged \
+ rancher/rancher:latest
+```
+
+此操作需要 [privileged 访问](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#rancher-特权访问)。
+
+这个 7 层 NGINX 配置已经在 NGINX 1.13(Mainline)和 1.14(Stable)版本上进行了测试。
+
+:::note
+
+此 NGINX 配置只是一个示例,可能不适合你的环境。如果需要查阅完整文档,请参见 [NGINX 负载均衡 - TCP 和 UDP 负载均衡器](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/)。
+
+:::
+
+```
+upstream rancher {
+ server rancher-server:80;
+}
+
+map $http_upgrade $connection_upgrade {
+ default Upgrade;
+ '' close;
+}
+
+server {
+ listen 443 ssl http2;
+ server_name rancher.yourdomain.com;
+ ssl_certificate /etc/your_certificate_directory/fullchain.pem;
+ ssl_certificate_key /etc/your_certificate_directory/privkey.pem;
+
+ location / {
+ proxy_set_header Host $host;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_pass http://rancher;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ # 此项允许执行的 shell 窗口保持开启,最长可达15分钟。不使用此参数的话,默认1分钟后自动关闭。
+ proxy_read_timeout 900s;
+ proxy_buffering off;
+ }
+}
+
+server {
+ listen 80;
+ server_name rancher.yourdomain.com;
+ return 301 https://$server_name$request_uri;
+}
+```
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md
new file mode 100644
index 00000000000..747f302b79c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log-in-downstream-clusters.md
@@ -0,0 +1,247 @@
+---
+title: 下游集群开启 API 审计日志
+---
+
+
+
+
+
+Kubernetes 审计提供了由 Kube-apiserver 执行的与安全相关的、按时间顺序排列的集群审计记录。Kube API 会在请求执行的每个阶段都生成一个事件,然后根据策略进行预处理并保存,审计策略配置了要记录的内容。
+
+你可能希望将审计日志配置为遵守互联网安全中心 (CIS) Kubernetes 基准控制的一部分。
+
+有关配置的详细信息,请参阅 [Kubernetes 官方文档](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/)。
+
+
+
+
+### 方法1(建议):设置 `machineGlobalConfig` 字段的 `audit-policy-file`
+
+你可以在配置文件中设置 `audit-policy-file`,Rancher 会将该文件保存在 Control Plane 节点的 `/var/lib/rancher/rke2/etc/config-files/audit-policy-file` 目录中,并在 RKE2 服务器中配置相应的选项。
+
+例子:
+```yaml
+apiVersion: provisioning.cattle.io/v1
+kind: Cluster
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ audit-policy-file: |
+ apiVersion: audit.k8s.io/v1
+ kind: Policy
+ rules:
+ - level: RequestResponse
+ resources:
+ - group: ""
+ resources:
+ - pods
+```
+
+### 方法2:直接使用 `machineSelectorFiles` 和 `machineGlobalConfig` 配置
+
+:::note
+
+Rancher v2.7.2 及以上版本提供此功能。
+
+:::
+
+你可以使用 `machineSelectorFiles` 将审计策略文件传递到 Control Plane 节点,并使用 `machineGlobalConfig` 设置 kube-apiserver 的选项。
+
+在此之前,你需要创建 [Secret](../new-user-guides/kubernetes-resources-setup/secrets.md) 或 [ConfigMap](../new-user-guides/kubernetes-resources-setup/configmaps.md) 作为审计策略的来源。
+
+Secret 或 ConfigMap 必须满足以下要求:
+
+1. 必须位于 Cluster 对象所在的 `fleet-default` 命名空间中。
+2. 它必须含有 Annotation `rke.cattle.io/object-authorized-for-clusters: ,`,以允许目标集群使用它。
+
+:::tip
+
+Rancher Dashboard 提供了易用的表单页面用于创建 Secret 或 ConfigMap。
+
+:::
+
+例子:
+
+```yaml
+apiVersion: v1
+data:
+ audit-policy: >-
+ IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE=
+kind: Secret
+metadata:
+ annotations:
+ rke.cattle.io/object-authorized-for-clusters: cluster1
+ name:
+ namespace: fleet-default
+```
+
+可以通过编辑集群 YAML 的 `machineSelectorFiles` 和 `machineGlobalConfig` 字段来启用和配置审计日志。
+
+例子:
+
+```yaml
+apiVersion: provisioning.cattle.io/v1
+kind: Cluster
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ kube-apiserver-arg:
+ - audit-policy-file=/dev-audit-policy.yaml
+ - audit-log-path=/dev-audit.logs
+ machineSelectorFiles:
+ - fileSources:
+ - configMap:
+ name: ''
+ secret:
+ items:
+ - key: audit-policy
+ path: /dev-audit-policy.yaml
+ name: dev-audit-policy
+ machineLabelSelector:
+ matchLabels:
+ rke.cattle.io/control-plane-role: 'true'
+```
+
+:::tip
+
+你还可以使用指令 `machineSelectorConfig` 和适当的 `machineLabelSelectors` 来达到相同的效果。
+
+:::
+
+有关集群配置的更多信息,请参阅 [RKE2 集群配置参考](../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md)页面。
+
+
+
+
+
+:::note
+
+Rancher v2.7.2 及以上版本提供此功能。
+
+:::
+
+你可以使用 `machineSelectorFiles` 将审计策略文件传递到 Control Plane 节点,并使用 `machineGlobalConfig` 设置 kube-apiserver 的选项。
+
+在此之前,你需要创建 [Secret](../new-user-guides/kubernetes-resources-setup/secrets.md) 或 [ConfigMap](../new-user-guides/kubernetes-resources-setup/configmaps.md) 作为审计策略的来源。
+
+Secret 或 ConfigMap 必须满足以下要求:
+
+1. 必须位于 Cluster 对象所在的 `fleet-default` 命名空间中。
+2. 它必须含有 Annotation `rke.cattle.io/object-authorized-for-clusters: ,`,以允许目标集群使用它。
+
+:::tip
+
+Rancher Dashboard 提供了易于使用的表单页面用于创建 [Secret](../new-user-guides/kubernetes-resources-setup/secrets.md) 或 [ConfigMap](../new-user-guides/kubernetes-resources-setup/configmaps.md)。
+
+:::
+
+例子:
+
+```yaml
+apiVersion: v1
+data:
+ audit-policy: >-
+ IyBMb2cgYWxsIHJlcXVlc3RzIGF0IHRoZSBNZXRhZGF0YSBsZXZlbC4KYXBpVmVyc2lvbjogYXVkaXQuazhzLmlvL3YxCmtpbmQ6IFBvbGljeQpydWxlczoKLSBsZXZlbDogTWV0YWRhdGE=
+kind: Secret
+metadata:
+ annotations:
+ rke.cattle.io/object-authorized-for-clusters: cluster1
+ name:
+ namespace: fleet-default
+```
+
+可以通过编辑集群 YAML 的 `machineSelectorFiles` 和 `machineGlobalConfig` 字段来启用和配置审计日志。
+
+例子:
+
+```yaml
+apiVersion: provisioning.cattle.io/v1
+kind: Cluster
+spec:
+ rkeConfig:
+ machineGlobalConfig:
+ kube-apiserver-arg:
+ - audit-policy-file=/dev-audit-policy.yaml
+ - audit-log-path=/dev-audit.logs
+ machineSelectorFiles:
+ - fileSources:
+ - configMap:
+ name: ''
+ secret:
+ items:
+ - key: audit-policy
+ path: /dev-audit-policy.yaml
+ name: dev-audit-policy
+ machineLabelSelector:
+ matchLabels:
+ rke.cattle.io/control-plane-role: 'true'
+```
+
+:::tip
+
+你还可以使用指令 `machineSelectorConfig` 和适当的 `machineLabelSelectors` 来达到相同的效果。
+
+:::
+
+有关集群配置的更多信息,请参阅 [K3s 集群配置参考](../../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md) 页面。
+
+
+
+
+
+可通过编辑集群 YAML 来启用和配置审计日志。
+
+在启用审计日志后,将使用 RKE1 的默认值。
+
+```yaml
+#
+# Rancher Config
+#
+rancher_kubernetes_engine_config:
+ services:
+ kube-api:
+ audit_log:
+ enabled: true
+```
+
+你还可以自定义审计日志配置。
+
+```yaml
+#
+# Rancher Config
+#
+rancher_kubernetes_engine_config:
+ services:
+ kube-api:
+ audit_log:
+ enabled: true
+ configuration:
+ max_age: 6
+ max_backup: 6
+ max_size: 110
+ path: /var/log/kube-audit/audit-log.json
+ format: json
+ policy:
+ apiVersion: audit.k8s.io/v1 # 这里必须填写
+ kind: Policy
+ omitStages:
+ - "RequestReceived"
+ rules:
+ # Log pod changes at RequestResponse level
+ - level: RequestResponse
+ resources:
+ - group: ""
+ # Resource "pods" doesn't match requests to any subresource of pods,
+ # which is consistent with the RBAC policy.
+ resources: ["pods"]
+ # Log "pods/log", "pods/status" at Metadata level
+ - level: Metadata
+ resources:
+ - group: ""
+ resources: ["pods/log", "pods/status"]
+```
+
+配置详情请参考 [RKE1 官方文档](https://rke.docs.rancher.com/config-options/audit-log)。
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log.md
new file mode 100644
index 00000000000..94974cb52f7
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-api-audit-log.md
@@ -0,0 +1,558 @@
+---
+title: 启用 API 审计日志以记录系统事件
+---
+
+你可以启用 API 审计日志来记录各个用户发起的系统事件的顺序。通过查看日志,你可以了解发生了什么事件、事件发生的时间,事件发起人,以及事件影响的集群。启用此功能后,所有 Rancher API 的请求和响应都会写入日志中。
+
+API 审计可以在 Rancher 安装或升级期间启用。
+
+## 启用 API 审计日志
+
+你可以将环境变量传递给 Rancher Server 容器,从而启用和配置审计日志。请参见以下文档,在安装时启用该功能:
+
+- [Docker 安装](../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-审计日志)
+
+- [Kubernetes 安装](../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#api-审计日志)
+
+## API 审计日志选项
+
+以下参数定义了审计日志的记录规则,其中包括应该记录什么内容以及包括什么数据:
+
+| 参数 | 描述 |
+| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `AUDIT_LEVEL` | `0` - 禁用审计日志(默认) `1` - 日志事件元数据 `2` - 日志事件元数据和请求体 `3` - 日志事件元数据,请求体和响应体。请求/响应对的每个日志事务都使用同一个的 `auditID`。 如需了解每个设置记录的日志内容,请参见[审计日志级别](#审核日志级别)。 |
+| `AUDIT_LOG_PATH` | Rancher Server API 的日志路径。默认路径:`/var/log/auditlog/rancher-api-audit.log`。你可以将日志目录挂载到主机。 示例:`AUDIT_LOG_PATH=/my/custom/path/` |
+| `AUDIT_LOG_MAXAGE` | 旧审计日志文件可保留的最大天数。默认为 10 天。 |
+| `AUDIT_LOG_MAXBACKUP` | 保留的审计日志最大文件个数。默认值为 10。 |
+| `AUDIT_LOG_MAXSIZE` | 在审计日志文件被轮换前的最大容量,单位是 MB。默认大小为 100MB。 |
+
+
+
+### 审核日志级别
+
+下表介绍了每个 [`AUDIT_LEVEL`](#audit-level) 记录的 API 事务:
+
+| `AUDIT_LEVEL` 设置 | 请求元数据 | 请求体 | 响应元数据 | 响应体 |
+| --------------------- | ---------------- | ------------ | ----------------- | ------------- |
+| `0` | | | | |
+| `1` | ✓ | | | |
+| `2` | ✓ | ✓ | | |
+| `3` | ✓ | ✓ | ✓ | ✓ |
+
+## 查看 API 审计日志
+
+### Docker 安装
+
+与主机系统共享 `AUDIT_LOG_PATH` 目录(默认目录:`/var/log/auditlog`)。日志可以通过标准 CLI 工具进行解析,也可以转发到 Fluentd、Filebeat、Logstash 等日志收集工具。
+
+### Kubernetes 安装
+
+使用 Helm Chart 安装 Rancher 时启动 API 审计日志,会在 Rancher Pod 中创建一个 `rancher-audit-log` Sidecar 容器。该容器会将日志发送到标准输出 (stdout)。你可以像查看其他容器的日志一样查看 API 审计日志。
+
+`rancher-audit-log` 容器位于 `cattle-system` 命名空间中的 `rancher` Pod 中。
+
+#### CLI
+
+```bash
+kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log
+```
+
+#### 发送审计日志
+
+你可以为集群启用 Rancher 的内置日志收集和传送功能,将审计日志和其他服务日志发送到支持的 endpoint。详情请参见 [Rancher 工具 - Logging](../../pages-for-subheaders/logging.md)。
+
+## 审计日志示例
+
+启用审计日志后,Rancher 会以 JSON 格式记录每个 API 的请求和响应。下文的代码示例展示了如何查看 API 事务。
+
+### 元数据日志级别
+
+如果你将 `AUDIT_LEVEL` 设置为 `1`,Rancher 只会记录每个 API 请求的元数据标头,而不会记录请求体。标头记录了 API 事务的基本信息,包括 ID、发起人、发起时间等。代码示例如下:
+
+```json
+{
+ "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183",
+ "requestURI": "/v3/schemas",
+ "sourceIPs": ["::1"],
+ "user": {
+ "name": "user-f4tt2",
+ "group": ["system:authenticated"]
+ },
+ "verb": "GET",
+ "stage": "RequestReceived",
+ "stageTimestamp": "2018-07-20 10:22:43 +0800"
+}
+```
+
+### 元数据和请求体日志级别
+
+如果你将 `AUDIT_LEVEL` 设置为 `2`,Rancher 会记录每个 API 请求的元数据标头和请求体。
+
+下面的代码示例描述了一个 API 请求,包括它的元数据标头和正文:
+
+```json
+{
+ "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb",
+ "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx",
+ "sourceIPs": ["::1"],
+ "user": {
+ "name": "user-f4tt2",
+ "group": ["system:authenticated"]
+ },
+ "verb": "PUT",
+ "stage": "RequestReceived",
+ "stageTimestamp": "2018-07-20 10:28:08 +0800",
+ "requestBody": {
+ "hostIPC": false,
+ "hostNetwork": false,
+ "hostPID": false,
+ "paused": false,
+ "annotations": {},
+ "baseType": "workload",
+ "containers": [
+ {
+ "allowPrivilegeEscalation": false,
+ "image": "nginx",
+ "imagePullPolicy": "Always",
+ "initContainer": false,
+ "name": "nginx",
+ "ports": [
+ {
+ "containerPort": 80,
+ "dnsName": "nginx-nodeport",
+ "kind": "NodePort",
+ "name": "80tcp01",
+ "protocol": "TCP",
+ "sourcePort": 0,
+ "type": "/v3/project/schemas/containerPort"
+ }
+ ],
+ "privileged": false,
+ "readOnly": false,
+ "resources": {
+ "type": "/v3/project/schemas/resourceRequirements",
+ "requests": {},
+ "limits": {}
+ },
+ "restartCount": 0,
+ "runAsNonRoot": false,
+ "stdin": true,
+ "stdinOnce": false,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "type": "/v3/project/schemas/container",
+ "environmentFrom": [],
+ "capAdd": [],
+ "capDrop": [],
+ "livenessProbe": null,
+ "volumeMounts": []
+ }
+ ],
+ "created": "2018-07-18T07:34:16Z",
+ "createdTS": 1531899256000,
+ "creatorId": null,
+ "deploymentConfig": {
+ "maxSurge": 1,
+ "maxUnavailable": 0,
+ "minReadySeconds": 0,
+ "progressDeadlineSeconds": 600,
+ "revisionHistoryLimit": 10,
+ "strategy": "RollingUpdate"
+ },
+ "deploymentStatus": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2018-07-18T07:34:38Z",
+ "lastTransitionTimeTS": 1531899278000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2018-07-18T07:34:16Z",
+ "lastTransitionTimeTS": 1531899256000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.",
+ "reason": "NewReplicaSetAvailable",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 2,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "type": "/v3/project/schemas/deploymentStatus",
+ "unavailableReplicas": 0,
+ "updatedReplicas": 1
+ },
+ "dnsPolicy": "ClusterFirst",
+ "id": "deployment:default:nginx",
+ "labels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "name": "nginx",
+ "namespaceId": "default",
+ "projectId": "c-bcz5t:p-fdr4s",
+ "publicEndpoints": [
+ {
+ "addresses": ["10.64.3.58"],
+ "allNodes": true,
+ "ingressId": null,
+ "nodeId": null,
+ "podId": null,
+ "port": 30917,
+ "protocol": "TCP",
+ "serviceId": "default:nginx-nodeport",
+ "type": "publicEndpoint"
+ }
+ ],
+ "restartPolicy": "Always",
+ "scale": 1,
+ "schedulerName": "default-scheduler",
+ "selector": {
+ "matchLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "type": "/v3/project/schemas/labelSelector"
+ },
+ "state": "active",
+ "terminationGracePeriodSeconds": 30,
+ "transitioning": "no",
+ "transitioningMessage": "",
+ "type": "deployment",
+ "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd",
+ "workloadAnnotations": {
+ "deployment.kubernetes.io/revision": "1",
+ "field.cattle.io/creatorId": "user-f4tt2"
+ },
+ "workloadLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "scheduling": {
+ "node": {}
+ },
+ "description": "my description",
+ "volumes": []
+ }
+}
+```
+
+### 元数据、请求体和响应体日志级别
+
+如果你将 `AUDIT_LEVEL` 设置为 `3`,Rancher 会记录:
+
+- 每个 API 请求的元数据标头和请求体。
+- 每个 API 响应的元数据标头和响应体。
+
+#### 请求
+
+下面的代码示例描述了一个 API 请求,包括它的元数据标头和正文:
+
+```json
+{
+ "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af",
+ "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx",
+ "sourceIPs": ["::1"],
+ "user": {
+ "name": "user-f4tt2",
+ "group": ["system:authenticated"]
+ },
+ "verb": "PUT",
+ "stage": "RequestReceived",
+ "stageTimestamp": "2018-07-20 10:33:06 +0800",
+ "requestBody": {
+ "hostIPC": false,
+ "hostNetwork": false,
+ "hostPID": false,
+ "paused": false,
+ "annotations": {},
+ "baseType": "workload",
+ "containers": [
+ {
+ "allowPrivilegeEscalation": false,
+ "image": "nginx",
+ "imagePullPolicy": "Always",
+ "initContainer": false,
+ "name": "nginx",
+ "ports": [
+ {
+ "containerPort": 80,
+ "dnsName": "nginx-nodeport",
+ "kind": "NodePort",
+ "name": "80tcp01",
+ "protocol": "TCP",
+ "sourcePort": 0,
+ "type": "/v3/project/schemas/containerPort"
+ }
+ ],
+ "privileged": false,
+ "readOnly": false,
+ "resources": {
+ "type": "/v3/project/schemas/resourceRequirements",
+ "requests": {},
+ "limits": {}
+ },
+ "restartCount": 0,
+ "runAsNonRoot": false,
+ "stdin": true,
+ "stdinOnce": false,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "type": "/v3/project/schemas/container",
+ "environmentFrom": [],
+ "capAdd": [],
+ "capDrop": [],
+ "livenessProbe": null,
+ "volumeMounts": []
+ }
+ ],
+ "created": "2018-07-18T07:34:16Z",
+ "createdTS": 1531899256000,
+ "creatorId": null,
+ "deploymentConfig": {
+ "maxSurge": 1,
+ "maxUnavailable": 0,
+ "minReadySeconds": 0,
+ "progressDeadlineSeconds": 600,
+ "revisionHistoryLimit": 10,
+ "strategy": "RollingUpdate"
+ },
+ "deploymentStatus": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2018-07-18T07:34:38Z",
+ "lastTransitionTimeTS": 1531899278000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2018-07-18T07:34:16Z",
+ "lastTransitionTimeTS": 1531899256000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.",
+ "reason": "NewReplicaSetAvailable",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 2,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "type": "/v3/project/schemas/deploymentStatus",
+ "unavailableReplicas": 0,
+ "updatedReplicas": 1
+ },
+ "dnsPolicy": "ClusterFirst",
+ "id": "deployment:default:nginx",
+ "labels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "name": "nginx",
+ "namespaceId": "default",
+ "projectId": "c-bcz5t:p-fdr4s",
+ "publicEndpoints": [
+ {
+ "addresses": ["10.64.3.58"],
+ "allNodes": true,
+ "ingressId": null,
+ "nodeId": null,
+ "podId": null,
+ "port": 30917,
+ "protocol": "TCP",
+ "serviceId": "default:nginx-nodeport",
+ "type": "publicEndpoint"
+ }
+ ],
+ "restartPolicy": "Always",
+ "scale": 1,
+ "schedulerName": "default-scheduler",
+ "selector": {
+ "matchLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "type": "/v3/project/schemas/labelSelector"
+ },
+ "state": "active",
+ "terminationGracePeriodSeconds": 30,
+ "transitioning": "no",
+ "transitioningMessage": "",
+ "type": "deployment",
+ "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd",
+ "workloadAnnotations": {
+ "deployment.kubernetes.io/revision": "1",
+ "field.cattle.io/creatorId": "user-f4tt2"
+ },
+ "workloadLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "scheduling": {
+ "node": {}
+ },
+ "description": "my decript",
+ "volumes": []
+ }
+}
+```
+
+#### 响应
+
+下面的代码示例描述了一个 API 响应,包括它的元数据标头和正文:
+
+```json
+{
+ "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af",
+ "responseStatus": "200",
+ "stage": "ResponseComplete",
+ "stageTimestamp": "2018-07-20 10:33:06 +0800",
+ "responseBody": {
+ "actionLinks": {
+ "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause",
+ "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume",
+ "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback"
+ },
+ "annotations": {},
+ "baseType": "workload",
+ "containers": [
+ {
+ "allowPrivilegeEscalation": false,
+ "image": "nginx",
+ "imagePullPolicy": "Always",
+ "initContainer": false,
+ "name": "nginx",
+ "ports": [
+ {
+ "containerPort": 80,
+ "dnsName": "nginx-nodeport",
+ "kind": "NodePort",
+ "name": "80tcp01",
+ "protocol": "TCP",
+ "sourcePort": 0,
+ "type": "/v3/project/schemas/containerPort"
+ }
+ ],
+ "privileged": false,
+ "readOnly": false,
+ "resources": {
+ "type": "/v3/project/schemas/resourceRequirements"
+ },
+ "restartCount": 0,
+ "runAsNonRoot": false,
+ "stdin": true,
+ "stdinOnce": false,
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "tty": true,
+ "type": "/v3/project/schemas/container"
+ }
+ ],
+ "created": "2018-07-18T07:34:16Z",
+ "createdTS": 1531899256000,
+ "creatorId": null,
+ "deploymentConfig": {
+ "maxSurge": 1,
+ "maxUnavailable": 0,
+ "minReadySeconds": 0,
+ "progressDeadlineSeconds": 600,
+ "revisionHistoryLimit": 10,
+ "strategy": "RollingUpdate"
+ },
+ "deploymentStatus": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2018-07-18T07:34:38Z",
+ "lastTransitionTimeTS": 1531899278000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ },
+ {
+ "lastTransitionTime": "2018-07-18T07:34:16Z",
+ "lastTransitionTimeTS": 1531899256000,
+ "lastUpdateTime": "2018-07-18T07:34:38Z",
+ "lastUpdateTimeTS": 1531899278000,
+ "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.",
+ "reason": "NewReplicaSetAvailable",
+ "status": "True",
+ "type": "Progressing"
+ }
+ ],
+ "observedGeneration": 2,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "type": "/v3/project/schemas/deploymentStatus",
+ "unavailableReplicas": 0,
+ "updatedReplicas": 1
+ },
+ "dnsPolicy": "ClusterFirst",
+ "hostIPC": false,
+ "hostNetwork": false,
+ "hostPID": false,
+ "id": "deployment:default:nginx",
+ "labels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "links": {
+ "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx",
+ "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions",
+ "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx",
+ "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx",
+ "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml"
+ },
+ "name": "nginx",
+ "namespaceId": "default",
+ "paused": false,
+ "projectId": "c-bcz5t:p-fdr4s",
+ "publicEndpoints": [
+ {
+ "addresses": ["10.64.3.58"],
+ "allNodes": true,
+ "ingressId": null,
+ "nodeId": null,
+ "podId": null,
+ "port": 30917,
+ "protocol": "TCP",
+ "serviceId": "default:nginx-nodeport"
+ }
+ ],
+ "restartPolicy": "Always",
+ "scale": 1,
+ "schedulerName": "default-scheduler",
+ "selector": {
+ "matchLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ },
+ "type": "/v3/project/schemas/labelSelector"
+ },
+ "state": "active",
+ "terminationGracePeriodSeconds": 30,
+ "transitioning": "no",
+ "transitioningMessage": "",
+ "type": "deployment",
+ "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd",
+ "workloadAnnotations": {
+ "deployment.kubernetes.io/revision": "1",
+ "field.cattle.io/creatorId": "user-f4tt2"
+ },
+ "workloadLabels": {
+ "workload.user.cattle.io/workloadselector": "deployment-default-nginx"
+ }
+ }
+}
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
new file mode 100644
index 00000000000..b1b865436e6
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/continuous-delivery.md
@@ -0,0 +1,13 @@
+---
+title: 持续交付
+---
+
+Rancher 中预装的 [Fleet](../../../integrations-in-rancher/fleet/fleet.md) 无法完全禁用。但是,你可以使用 `continuous-delivery` 功能开关来禁用 GitOps 持续交付的 Fleet 功能。
+
+如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+
+| 环境变量键 | 默认值 | 描述 |
+---|---|---
+| `continuous-delivery` | `true` | 此开关禁用 Fleet 的 GitOps 持续交付功能。 |
+
+如果你在 Rancher 2.5.x 中禁用了 Fleet,然后将 Rancher 升级到 v2.6.x,Fleet 将启用。只有 Fleet 的持续交付功能可以被禁用。当 `continuous-delivery` 被禁用时,`gitjob` deployment 不再部署到 Rancher Server 的本地集群中,且 `continuous-delivery` 不会在 Rancher UI 中显示。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/enable-experimental-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md
similarity index 86%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/enable-experimental-features.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md
index 4f759f616fe..710abfc58ec 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/enable-experimental-features.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/enable-experimental-features.md
@@ -2,7 +2,11 @@
title: 启用实验功能
---
-Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当你认为使用[不支持的存储类型](../how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md)的好处大于使用未经测试的功能的风险时,你可能想要启用实验功能。为了让你能够试用这些默认关闭的功能,我们引入了功能开关(feature flag)。
+
+
+
+
+Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当你认为使用[不支持的存储类型](unsupported-storage-drivers.md)的好处大于使用未经测试的功能的风险时,你可能想要启用实验功能。为了让你能够试用这些默认关闭的功能,我们引入了功能开关(feature flag)。
实验功能可以通过以下三种方式启用:
@@ -19,7 +23,7 @@ Rancher 包含一些默认关闭的实验功能。在某些情况下,例如当
设置值是通过 API 设置的,而默认值是通过命令行设置。因此,如果你使用 API 或 UI 启用或禁用某个功能,命令行中设置的值将被覆盖。
-如果你安装 Rancher 后使用 Rancher API 将功能开关设置为 true,然后在使用命令升级 Rancher 时将功能开关设置为 false,在这种情况下,虽然默认值会是 false,但是该功能依然会被启用,因为它是通过 API 设置的。如果你随后使用 Rancher API 删除设置值(true)并将它设置为 NULL,则默认值(false)将生效。有关详细信息,请参阅[功能开关页面](../getting-started/installation-and-upgrade/installation-references/feature-flags.md)。
+如果你安装 Rancher 后使用 Rancher API 将功能开关设置为 true,然后在使用命令升级 Rancher 时将功能开关设置为 false,在这种情况下,虽然默认值会是 false,但是该功能依然会被启用,因为它是通过 API 设置的。如果你随后使用 Rancher API 删除设置值(true)并将它设置为 NULL,则默认值(false)将生效。有关详细信息,请参阅[功能开关页面](../../../getting-started/installation-and-upgrade/installation-references/feature-flags.md)。
## 启动 Rancher 时启用功能
@@ -53,20 +57,19 @@ helm install rancher rancher-latest/rancher \
### 离线安装的情况下渲染 Helm Chart
-如果你是在离线环境安装 Rancher 的,在使用 Helm 安装 Rancher 之前,你需要添加一个 Helm Chart 仓库并渲染一个 Helm 模板。详情请参见[离线安装文档](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md)。
+如果你是在离线环境安装 Rancher 的,在使用 Helm 安装 Rancher 之前,你需要添加一个 Helm Chart 仓库并渲染一个 Helm 模板。详情请参见[离线安装文档](../../../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md)。
以下是在渲染 Helm 模板时传入功能开关名称的命令示例。下面的示例通过传递功能开关名称(用逗号分隔)来启用两个功能。
Helm 命令如下:
```
-helm template rancher ./rancher-.tgz --output-dir . \
- --no-hooks \ # 避免生成 Helm 钩子文件
+helm install rancher ./rancher-.tgz \
--namespace cattle-system \
--set hostname= \
--set rancherImage=/rancher/rancher \
--set ingress.tls.source=secret \
- --set systemDefaultRegistry= \ # 设置在 Rancher 中使用的默认私有镜像仓库
+ --set systemDefaultRegistry= \ # 设置在 Rancher 中使用的私有镜像仓库
--set useBundledSystemChart=true # 使用打包的 Rancher System Chart
--set 'extraEnv[0].name=CATTLE_FEATURES'
--set 'extraEnv[0].value==true,=true'
@@ -83,7 +86,6 @@ docker run -d -p 80:80 -p 443:443 \
--features==true,=true
```
-
## 使用 Rancher UI 启用功能
1. 在左上角,单击 **☰ > 全局设置**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
new file mode 100644
index 00000000000..35801de23b4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/istio-traffic-management-features.md
@@ -0,0 +1,32 @@
+---
+title: UI 管理 Istio 虚拟服务和目标规则
+---
+
+此功能可启动一个 UI,用于管理 Istio 的流量,其中包括创建、读取、更新和删除虚拟服务(Virtual Service)和目标规则(Destination Rule)。
+
+> **注意**:启用此功能并不会启用 Istio。集群管理员需要[为集群启用 Istio](../../../pages-for-subheaders/istio-setup-guide.md) 才能使用该功能。
+
+如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+
+| 环境变量键 | 默认值 | 状态 | 可用于 |
+---|---|---|---
+| `istio-virtual-service-ui` | `false` | 实验功能 | v2.3.0 |
+| `istio-virtual-service-ui` | `true` | GA | v2.3.2 |
+
+## 功能介绍
+
+Istio 流量管理功能的主要优势时允许动态请求路由,这对于金丝雀发布,蓝/绿发布或 A/B 测试都非常有用。
+
+启用此功能后,一个页面会打开,让你通过 Rancher UI 配置 Istio 的某些流量管理功能。如果不使用此功能,你可以通过 `kubectl` 来使用 Istio 管理流量。
+
+此功能会启用两个选项卡,一个用于**虚拟服务**,另一个用于**目标规则**。
+
+- **虚拟服务**:拦截并将流量重定向到你的 Kubernetes Service 上。这样,你可以将部分请求流量定向到不同的服务上。你可以使用这些服务来定义一组路由规则,用于主机寻址。详情请参见 [Istio 官方文档](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/)。
+- **目标规则**:作为唯一可信来源,表明哪些服务版本可用于接收虚拟服务的流量。你可以使用这些资源来定义策略,这些策略适用于路由发生后用于服务的流量。详情请参见 [Istio 官方文档](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule)。
+
+如需查看选项卡:
+
+1. 点击 **☰ > 集群管理**。
+1. 转到安装了 Istio 的集群,然后单击 **Explore**。
+1. 在左侧导航栏中,单击 **Istio**。
+1. 你将看到 **Kiali** 和 **Jaeger** 的选项卡。在左侧导航栏中,你可查看和配置**虚拟服务**和**目标规则**。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
new file mode 100644
index 00000000000..16ebb15e25a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/rancher-on-arm64.md
@@ -0,0 +1,44 @@
+---
+title: "在 ARM64 上运行 Rancher(实验性)"
+---
+
+:::caution
+
+在使用 ARM64 架构的节点上运行 Rancher 目前还处在实验阶段,Rancher 尚未正式支持该功能。因此,我们不建议你在生产环境中使用 ARM64 架构的节点。
+
+:::
+
+如果你的节点使用 ARM64 架构,你可以使用以下选项:
+
+- 在 ARM64 架构的节点上运行 Rancher
+ - 此选项仅适用于 Docker 安装。请知悉,以下安装命令取代了 [Docker 安装链接](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md)中的示例:
+
+ ```
+ # 在最后一行 `rancher/rancher:vX.Y.Z` 中,请务必将 "X.Y.Z" 替换为包含 ARM64 版本的发布版本。例如,如果你的匹配版本是 v2.5.8,请在此行填写 `rancher/rancher:v2.5.8`。
+ docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ --privileged \
+ rancher/rancher:vX.Y.Z
+ ```
+
+:::note
+
+要检查你的发行版本是否与 ARM64 架构兼容,你可以使用以下两种方式找到对应版本的发行说明:
+
+- 访问 [Rancher 发行版本](https://github.com/rancher/rancher/releases)自行查询。
+- 根据标签和版本号直接找到你的版本。例如,你使用的版本为 2.5.8,你可以访问 [Rancher 发行版本 - 2.5.8](https://github.com/rancher/rancher/releases/tag/v2.5.8)。
+
+:::
+
+- 创建自定义集群并添加使用 ARM64 架构的节点
+ - Kubernetes 集群必须为 1.12 或更高版本
+ - CNI 网络插件必须是 [Flannel](../../../faq/container-network-interface-providers.md#flannel)
+- 导入包含使用 ARM64 架构的节点的集群
+ - Kubernetes 集群必须为 1.12 或更高版本
+
+如需了解如何配置集群选项,请参见[集群选项](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md)。
+
+以下是未经测试的功能:
+
+- Monitoring、告警、Notifiers、流水线和 Logging
+- 通过应用商店发布应用
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
new file mode 100644
index 00000000000..19b9b27b2e0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/enable-experimental-features/unsupported-storage-drivers.md
@@ -0,0 +1,39 @@
+---
+title: 使用非默认支持的存储驱动
+---
+
+此功能允许你使用不是默认启用的存储提供商和卷插件。
+
+如需启用或禁用此功能,请参见[启用实验功能主页](../../../pages-for-subheaders/enable-experimental-features.md)中的说明。
+
+| 环境变量键 | 默认值 | 描述 |
+---|---|---
+| `unsupported-storage-drivers` | `false` | 启用非默认启用的存储提供商和卷插件。 |
+
+### 默认启用的持久卷插件
+下表描述了默认启用的存储类型对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
+
+| 名称 | 插件 |
+--------|----------
+| Amazon EBS Disk | `aws-ebs` |
+| AzureFile | `azure-file` |
+| AzureDisk | `azure-disk` |
+| Google Persistent Disk | `gce-pd` |
+| Longhorn | `flex-volume-longhorn` |
+| VMware vSphere Volume | `vsphere-volume` |
+| 本地 | `local` |
+| 网络文件系统 | `nfs` |
+| hostPath | `host-path` |
+
+### 默认启用的 StorageClass
+下表描述了默认启用的 StorageClass 对应的持久卷插件。启用此功能开关时,不在此列表中的任何持久卷插件均被视为实验功能,且不受支持:
+
+| 名称 | 插件 |
+--------|--------
+| Amazon EBS Disk | `aws-ebs` |
+| AzureFile | `azure-file` |
+| AzureDisk | `azure-disk` |
+| Google Persistent Disk | `gce-pd` |
+| Longhorn | `flex-volume-longhorn` |
+| VMware vSphere Volume | `vsphere-volume` |
+| 本地 | `local` |
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
new file mode 100644
index 00000000000..39c9bdee036
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md
@@ -0,0 +1,29 @@
+---
+title: 1. 在集群中启用 Istio
+---
+
+:::note 先决条件:
+
+- 只有分配了 `cluster-admin` [Kubernetes 默认角色](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles)的用户可以在 Kubernetes 集群中配置和安装 Istio。
+- 如果你有 pod 安全策略,则需要安装启用了 CNI 的 Istio。有关详细信息,请参阅[本节](../../../integrations-in-rancher/istio/configuration-options/pod-security-policies.md)。
+- 要在 RKE2 集群上安装 Istio,则需要执行额外的步骤。有关详细信息,请参阅[本节](../../../integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md)。
+- 要在启用了项目网络隔离的集群中安装 Istio,则需要执行额外的步骤。有关详细信息,请参阅[本节](../../../integrations-in-rancher/istio/configuration-options/project-network-isolation.md)。
+
+:::
+
+1. 点击 **☰ > 集群管理**。
+1. 转到要启用 Istio 的位置,然后单击 **Explore**。
+1. 单击 **Apps**。
+1. 单击 **Chart**。
+1. 单击 **Istio**。
+1. 如果你还没有安装 Monitoring 应用,系统会提示你安装 rancher-monitoring。你也可以选择在 Rancher-monitoring 安装上设置选择器或抓取配置选项。
+1. 可选:为 Istio 组件配置成员访问和[资源限制](../../../integrations-in-rancher/istio/cpu-and-memory-allocations.md)。确保你的 Worker 节点上有足够的资源来启用 Istio。
+1. 可选:如果需要,对 values.yaml 进行额外的配置更改。
+1. 可选:通过[覆盖文件](../../../pages-for-subheaders/configuration-options.md#覆盖文件)来添加其他资源或配置。
+1. 单击**安装**。
+
+**结果**:已在集群级别安装 Istio。
+
+## 其他配置选项
+
+有关配置 Istio 的更多信息,请参阅[配置参考](../../../pages-for-subheaders/configuration-options.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
new file mode 100644
index 00000000000..3651b9d77c6
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md
@@ -0,0 +1,53 @@
+---
+title: 2. 在命名空间中启用 Istio
+---
+
+你需要在需要由 Istio 跟踪或控制的每个命名空间中手动启用 Istio。在命名空间中启用 Istio 时,Envoy sidecar 代理将自动注入到部署在命名空间中的所有新工作负载中。
+
+此命名空间设置只会影响命名空间中的新工作负载。之前的工作负载需要重新部署才能使用 sidecar 自动注入。
+
+:::note 先决条件:
+
+要在命名空间中启用 Istio,集群必须安装 Istio。
+
+:::
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 单击**集群 > 项目/命名空间**。
+1. 转到要启用 Istio 的命名空间,然后单击**⋮ > 启用 Istio 自动注入**。或者,你也可以单击命名空间,然后在命名空间详情页面上,单击**⋮ > 启用 Istio 自动注入**。
+
+**结果**:命名空间带有了 `istio-injection=enabled` 标签。默认情况下,部署在此命名空间中的所有新工作负载都将注入 Istio sidecar。
+
+### 验证是否启用了自动 Istio Sidecar 注入
+
+要验证 Istio 是否已启用,请在命名空间中部署一个 hello-world 工作负载。转到工作负载并单击 pod 名称。在**容器**中,你应该能看到 `istio-proxy` 容器。
+
+### 排除工作负载的 Istio Sidecar 注入
+
+要排除 Istio sidecar 被注入某工作负载,请在工作负载上使用以下注释:
+
+```
+sidecar.istio.io/inject: “false”
+```
+
+要将注释添加到工作负载:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 点击**工作负载**。
+1. 转到不需要 sidecar 的工作负载并以 yaml 编辑。
+1. 将键值 `sidecar.istio.io/inject: false` 添加为工作负载的注释。
+1. 单击**保存**。
+
+**结果**:Istio sidecar 不会被注入到工作负载中。
+
+:::note
+
+如果你遇到部署的 job 未完成的问题,则需要使用提供的步骤将此注释添加到 pod 中。由于 Istio Sidecars 会一直运行,因此即使任务完成了,也不能认为 Job 已完成。
+
+:::
+
+
+### 后续步骤
+[使用 Istio Sidecar 添加部署](use-istio-sidecar.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md
new file mode 100644
index 00000000000..a6d77763919
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md
@@ -0,0 +1,26 @@
+---
+title: 6. 生成和查看流量
+---
+
+本文介绍如何查看 Istio 管理的流量。
+
+## Kiali 流量图
+
+Istio 概览页面提供了 Kiali 仪表板的链接。在 Kiali 仪表板中,你可以查看每个命名空间的图。Kiali 图提供了一种强大的方式来可视化 Istio 服务网格的拓扑。它显示了服务之间相互通信的情况。
+
+:::note 先决条件:
+
+要显示流量图,请确保你在集群中安装了 Prometheus。Rancher-istio 安装了默认配置的 Kiali 来与 rancher-monitoring Chart 一起工作。你可以使用 rancher-monitoring 或安装自己的监控解决方案。你也可以通过设置[选择器 & 抓取配置](../../../integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md)选项来更改数据抓取的配置(可选)。
+
+:::
+
+要查看流量图:
+
+1. 在安装了 Istio 的集群中,点击左侧导航栏中的 **Istio**。
+1. 单击 **Kiali** 链接。
+1. 单击侧导航中的**图**。
+1. 在**命名空间**下拉列表中,更改命名空间以查看每个命名空间的流量。
+
+如果你多次刷新 BookInfo 应用的 URL,你将能够在 Kiali 图上看到绿色箭头,显示 `reviews` 服务 `v1` 和 `v3` 的流量。图右侧的控制面板可用于配置详细信息,包括应在图上显示多少分钟的最新流量。
+
+对于其他工具和可视化,你可以从**监控** > **概览**页面转到 Grafana 和 Prometheus 仪表板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/istio-setup-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/istio-setup-guide.md
new file mode 100644
index 00000000000..e6b3e08e753
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/istio-setup-guide.md
@@ -0,0 +1,34 @@
+---
+title: 设置指南
+---
+
+
+
+
+
+本文介绍如何启用 Istio 并在你的项目中使用它。
+
+如果你使用 Istio 进行流量管理,则需要允许外部流量进入集群。在这种情况下,你将需要执行以下所有步骤。
+
+## 先决条件
+
+本指南假设你已经[安装 Rancher](../../../getting-started/installation-and-upgrade/installation-and-upgrade.md),且已经[配置了一个单独的 Kubernetes 集群](../../new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md)并要在该集群上安装 Istio。
+
+集群中的节点必须满足 [CPU 和内存要求](../../../integrations-in-rancher/istio/cpu-and-memory-allocations.md)。
+
+Istio 控制的工作负载和服务必须满足 [Istio 要求](https://istio.io/docs/setup/additional-setup/requirements/)。
+
+## 安装
+
+:::tip 快速设置提示:
+
+如果你不需要外部流量到达 Istio,而只想设置 Istio 以监控和跟踪集群内的流量,请跳过[设置 Istio Gateway](set-up-istio-gateway.md)和[设置 Istio 的流量管理组件](set-up-traffic-management.md)步骤。
+
+:::
+
+1. [在集群中启用 Istio。](enable-istio-in-cluster.md)
+2. [在命名空间中启用 Istio。](enable-istio-in-namespace.md)
+3. [使用 Istio Sidecar 添加部署和服务。](use-istio-sidecar.md)
+4. [设置 Istio Gateway。](set-up-istio-gateway.md)
+5. [设置 Istio 的流量管理组件。](set-up-traffic-management.md)
+6. [生成和查看流量。](generate-and-view-traffic.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md
new file mode 100644
index 00000000000..42f89baa8ef
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md
@@ -0,0 +1,147 @@
+---
+title: 4. 设置 Istio Gateway
+---
+
+每个集群的网关可以有自己的端口或负载均衡器,这与服务网格无关。默认情况下,每个 Rancher 配置的集群都有一个 NGINX Ingress Controller 来允许流量进入集群。
+
+无论是否安装了 Istio,你都可以使用 NGINX Ingress Controller。如果这是你集群的唯一网关,Istio 将能够将流量从集群内部的服务路由到集群内部的另一个服务,但 Istio 将无法接收来自集群外部的流量。
+
+要让 Istio 接收外部流量,你需要启用 Istio 的网关,作为外部流量的南北代理。启用 Istio Gateway 后,你的集群将有两个 Ingress。
+
+你还需要为你的服务设置 Kubernetes 网关。此 Kubernetes 资源指向 Istio 对集群 Ingress Gateway 的实现。
+
+你可以使用负载均衡器将流量路由到服务网格中,或使用 Istio 的 NodePort 网关。本文介绍如何设置 NodePort 网关。
+
+有关 Istio Gateway 的更多信息,请参阅 [Istio 文档](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/)。
+
+
+
+## 启用 Istio Gateway
+
+Ingress Gateway 是一个 Kubernetes 服务,将部署在你的集群中。Istio Gateway 支持更多自定义设置,更加灵活。
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,单击 **Istio > 网关**。
+1. 单击**使用 YAML 文件创建**。
+1. 粘贴你的 Istio Gateway yaml,或选择**从文件读取**。
+1. 单击**创建**。
+
+**结果**:已部署网关,将使用应用的规则来路由流量。
+
+## Istio Gateway 示例
+
+在演示工作负载示例时,我们在服务中添加 BookInfo 应用部署。接下来,我们添加一个 Istio Gateway,以便从集群外部访问该应用。
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,单击 **Istio > 网关**。
+1. 单击**使用 YAML 文件创建**。
+1. 复制并粘贴下面的 Gateway YAML。
+1. 单击**创建**。
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: Gateway
+metadata:
+ name: bookinfo-gateway
+spec:
+ selector:
+ istio: ingressgateway # use istio default controller
+ servers:
+ - port:
+ number: 80
+ name: http
+ protocol: HTTP
+ hosts:
+ - "*"
+---
+```
+
+然后,部署为 Gateway 提供流量路由的 VirtualService:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,单击 **Istio > VirtualServices**。
+1. 复制并粘贴下面的 VirtualService YAML。
+1. 单击**创建**。
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: bookinfo
+spec:
+ hosts:
+ - "*"
+ gateways:
+ - bookinfo-gateway
+ http:
+ - match:
+ - uri:
+ exact: /productpage
+ - uri:
+ prefix: /static
+ - uri:
+ exact: /login
+ - uri:
+ exact: /logout
+ - uri:
+ prefix: /api/v1/products
+ route:
+ - destination:
+ host: productpage
+ port:
+ number: 9080
+```
+
+**结果**:你已配置网关资源,Istio 现在可以接收集群外部的流量。
+
+运行以下命令来确认资源存在:
+```
+kubectl get gateway -A
+```
+
+结果应与以下内容类似:
+```
+NAME AGE
+bookinfo-gateway 64m
+```
+
+### 在 Web 浏览器访问 ProductPage 服务
+
+要测试 BookInfo 应用是否已正确部署,你可以使用 Istio 控制器 IP 和端口以及在 Kubernetes 网关资源中指定的请求名称,在 Web 浏览器中查看该应用:
+
+`http://:/productpage`
+
+要获取 Ingress Gateway URL 和端口:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,单击**工作负载**。
+1. 向下滚动到 `istio-system` 命名空间。
+1. 在 `istio-system`中,有一个名为 `istio-ingressgateway` 的工作负载。在此工作负载的名称下,你应该会看到如 `80/tcp` 的链接。
+1. 单击其中一个链接。然后,你的 Web 浏览器中会显示 Ingress Gateway 的 URL。将 `/productpage` 尾附到 URL。
+
+**结果**:你能会在 Web 浏览器中看到 BookInfo 应用。
+
+如需检查 Istio 控制器 URL 和端口的帮助,请尝试运行 [Istio 文档](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports)中的命令。
+
+## 故障排除
+
+[官方 Istio 文档](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting)建议使用 `kubectl` 命令来检查外部请求的正确 ingress 主机和 ingress 端口。
+
+### 确认 Kubernetes 网关与 Istio 的 Ingress Controller 匹配
+
+你可以尝试执行本节中的步骤以确保 Kubernetes 网关配置正确。
+
+在网关资源中,选择器通过标签来引用 Istio 的默认 Ingress Controller,其中标签的键是 `Istio`,值是 `ingressgateway`。要确保标签适用于网关,请执行以下操作:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,单击**工作负载**。
+1. 向下滚动到 `istio-system` 命名空间。
+1. 在 `istio-system`中,有一个名为 `istio-ingressgateway` 的工作负载。单击此工作负载的名称并转到**标签和注释**部分。你应该看到它具有 `istio` 键和 `ingressgateway` 值。这确认了 Gateway 资源中的选择器与 Istio 的默认 ingress controller 匹配。
+
+### 后续步骤
+[设置 Istio 的流量管理组件](set-up-traffic-management.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
new file mode 100644
index 00000000000..9782bd28938
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md
@@ -0,0 +1,76 @@
+---
+title: 5. 设置 Istio 的流量管理组件
+---
+
+Istio 中流量管理的一个核心优势是允许动态请求路由。动态请求路由通常应用于金丝雀部署和蓝/绿部署等。Istio 流量管理中的两个关键资源是*虚拟服务*和*目标规则*。
+
+- [虚拟服务](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/):拦截并将流量重定向到你的 Kubernetes Service 上。这样,你可以将部分请求流量分配到不同的服务上。你可以使用这些服务来定义一组路由规则,用于主机寻址。
+- [目标规则](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/):作为唯一可信来源,表明哪些服务版本可用于接收虚拟服务的流量。你可以使用这些资源来定义策略,这些策略适用于路由发生后用于服务的流量。
+
+本文介绍如何在示例 BookInfo 应用中添加与 `reviews` 微服务对应的虚拟服务示例。此服务的目的是在 `reviews` 服务的两个版本之间划分流量。
+
+在这个示例中,我们将流量带到 `reviews` 服务中并拦截流量,这样,50% 的流量会流向服务的 `v1`,另外 50% 的流量会流向 `v2 `。
+
+部署这个虚拟服务后,我们将生成流量,并通过 Kiali 可视化看到流量平均路由到服务的两个版本中。
+
+要为 `reviews` 服务部署虚拟服务和目标规则:
+1. 点击 **☰ > 集群管理**。
+1. 转到安装了 Istio 的集群,然后单击 **Explore**。
+1. 在安装了 Istio 的集群中,点击左侧导航栏中的 **Istio > DestinationRules**。
+1. 单击**创建**。
+1. 复制并粘贴下面的 DestinationRule YAML。
+1. 单击**创建**。
+1. 单击**以 YAML 文件编辑**并使用此配置:
+
+ ```yaml
+ apiVersion: networking.istio.io/v1alpha3
+ kind: DestinationRule
+ metadata:
+ name: reviews
+ spec:
+ host: reviews
+ subsets:
+ - name: v1
+ labels:
+ version: v1
+ - name: v2
+ labels:
+ version: v2
+ - name: v3
+ labels:
+ version: v3
+ ```
+1. 单击**创建**。
+
+然后,部署提供利用 DestinationRule 的流量路由的 VirtualService:
+
+1. 单击侧导航栏中的 **VirtualService**。
+1. 单击**使用 YAML 文件创建**。
+1. 复制并粘贴下面的 VirtualService YAML。
+1. 单击**创建**。
+
+```yaml
+apiVersion: networking.istio.io/v1alpha3
+kind: VirtualService
+metadata:
+ name: reviews
+spec:
+ hosts:
+ - reviews
+ http:
+ - route:
+ - destination:
+ host: reviews
+ subset: v1
+ weight: 50
+ - destination:
+ host: reviews
+ subset: v3
+ weight: 50
+---
+```
+
+**结果**:生成流到该服务的流量时(例如,刷新 Ingress Gateway URL),你可以在 Kiali 流量图中看到流到 `reviews` 服务的流量被平均分配到了 `v1` 和 `v3`。
+
+### 后续步骤
+[生成和查看流量](generate-and-view-traffic.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
new file mode 100644
index 00000000000..2290de911ad
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md
@@ -0,0 +1,360 @@
+---
+title: 3. 使用 Istio Sidecar 添加部署和服务
+---
+
+:::note 先决条件:
+
+要为工作负载启用 Istio,你必须先在集群和命名空间中安装 Istio 应用。
+
+:::
+
+在命名空间中启用 Istio 只会为新工作负载启用自动 sidecar 注入。要为现有工作负载启用 Envoy sidecar,你需要手动为每个工作负载启用它。
+
+要在命名空间中的现有工作负载上注入 Istio sidecar:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要可视化的集群,然后单击 **Explore**。
+1. 点击**工作负载**。
+1. 转到要注入 Istio sidecar 的工作负载,然后单击 **⋮ > 重新部署**。重新部署工作负载后,该工作负载会自动注入 Envoy sidecar。
+
+等待几分钟,然后工作负载将升级并具有 Istio sidecar。单击它并转到**容器**。你应该能看到该工作负载旁边的 `istio-proxy`。这意味着为工作负载启用了 Istio sidecar。Istio 正在为 Sidecar Envoy 做所有的接线工作。如果你现在在 yaml 中启用它们,Istio 可以自动执行所有功能。
+
+### 添加部署和服务
+
+以下是在命名空间中添加新 **Deployment** 的几种方法:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 点击**工作负载**。
+1. 单击**创建**。
+1. 点击 **Deployment**。
+1. 填写表单,或**以 YAML 文件编辑**。
+1. 单击**创建**。
+
+要将 **Service** 添加到你的命名空间:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 点击**服务发现 > 服务**。
+1. 单击**创建**。
+1. 选择所需的服务类型。
+1. 填写表单,或**以 YAML 文件编辑**。
+1. 点击**创建**。
+
+你还可以使用 kubectl **shell** 来创建 deployment 和 service:
+
+1. 如果你的文件存储在本地集群中,运行 `kubectl create -f .yaml`。
+1. 或运行 `cat<< EOF | kubectl apply -f -`,将文件内容粘贴到终端,然后运行 `EOF` 来完成命令。
+
+### 部署和服务示例
+
+接下来,我们为 Istio 文档中的 BookInfo 应用的示例部署和服务添加 Kubernetes 资源:
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在顶部导航栏中,打开 kubectl shell。
+1. 运行 `cat<< EOF | kubectl apply -f -`。
+1. 将以下资源复制到 shell 中。
+1. 运行 `EOF`。
+
+这将在 Istio 的示例 BookInfo 应用中设置以下示例资源:
+
+Details 服务和部署:
+
+- 一个 `details` Service。
+- 一个 `bookinfo-details` 的 ServiceAccount。
+- 一个 `details-v1` Deployment。
+
+Ratings 服务和部署:
+
+- 一个 `ratings` Service。
+- 一个 `bookinfo-ratings` 的 ServiceAccount。
+- 一个 `ratings-v1` Deployment。
+
+Reviews 服务和部署(三个版本):
+
+- 一个 `reviews` Service。
+- 一个 `bookinfo-reviews` 的 ServiceAccount。
+- 一个 `reviews-v1` Deployment。
+- 一个 `reviews-v2` Deployment。
+- 一个 `reviews-v3` Deployment。
+
+Productpage 服务和部署:
+
+这是应用的主页,可以通过网络浏览器中查看。将从该页面调用其他服务。
+
+- 一个 `productpage` service。
+- 一个 `bookinfo-productpage` 的 ServiceAccount。
+- 一个 `productpage-v1` Deployment。
+
+### 资源 YAML
+
+```yaml
+# Copyright 2017 Istio Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##################################################################################################
+# Details service
+##################################################################################################
+apiVersion: v1
+kind: Service
+metadata:
+ name: details
+ labels:
+ app: details
+ service: details
+spec:
+ ports:
+ - port: 9080
+ name: http
+ selector:
+ app: details
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: bookinfo-details
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: details-v1
+ labels:
+ app: details
+ version: v1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: details
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: details
+ version: v1
+ spec:
+ serviceAccountName: bookinfo-details
+ containers:
+ - name: details
+ image: docker.io/istio/examples-bookinfo-details-v1:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+##################################################################################################
+# Ratings service
+##################################################################################################
+apiVersion: v1
+kind: Service
+metadata:
+ name: ratings
+ labels:
+ app: ratings
+ service: ratings
+spec:
+ ports:
+ - port: 9080
+ name: http
+ selector:
+ app: ratings
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: bookinfo-ratings
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ratings-v1
+ labels:
+ app: ratings
+ version: v1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ratings
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: ratings
+ version: v1
+ spec:
+ serviceAccountName: bookinfo-ratings
+ containers:
+ - name: ratings
+ image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+##################################################################################################
+# Reviews service
+##################################################################################################
+apiVersion: v1
+kind: Service
+metadata:
+ name: reviews
+ labels:
+ app: reviews
+ service: reviews
+spec:
+ ports:
+ - port: 9080
+ name: http
+ selector:
+ app: reviews
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: bookinfo-reviews
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: reviews-v1
+ labels:
+ app: reviews
+ version: v1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: reviews
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: reviews
+ version: v1
+ spec:
+ serviceAccountName: bookinfo-reviews
+ containers:
+ - name: reviews
+ image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: reviews-v2
+ labels:
+ app: reviews
+ version: v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: reviews
+ version: v2
+ template:
+ metadata:
+ labels:
+ app: reviews
+ version: v2
+ spec:
+ serviceAccountName: bookinfo-reviews
+ containers:
+ - name: reviews
+ image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: reviews-v3
+ labels:
+ app: reviews
+ version: v3
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: reviews
+ version: v3
+ template:
+ metadata:
+ labels:
+ app: reviews
+ version: v3
+ spec:
+ serviceAccountName: bookinfo-reviews
+ containers:
+ - name: reviews
+ image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+##################################################################################################
+# Productpage services
+##################################################################################################
+apiVersion: v1
+kind: Service
+metadata:
+ name: productpage
+ labels:
+ app: productpage
+ service: productpage
+spec:
+ ports:
+ - port: 9080
+ name: http
+ selector:
+ app: productpage
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: bookinfo-productpage
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: productpage-v1
+ labels:
+ app: productpage
+ version: v1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: productpage
+ version: v1
+ template:
+ metadata:
+ labels:
+ app: productpage
+ version: v1
+ spec:
+ serviceAccountName: bookinfo-productpage
+ containers:
+ - name: productpage
+ image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 9080
+---
+```
+
+### 后续步骤
+[设置 Istio Gateway](set-up-istio-gateway.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
new file mode 100644
index 00000000000..91578148d71
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md
@@ -0,0 +1,39 @@
+---
+title: Pod 安全策略
+---
+
+:::note
+
+本文介绍的集群选项仅适用于 [Rancher 已在其中启动 Kubernetes 的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+
+:::
+
+你可以在创建项目的时候设置 Pod 安全策略(PSP)。如果在创建项目期间没有为项目分配 PSP,你也随时可以将 PSP 分配给现有项目。
+
+### 先决条件
+
+- 在 Rancher 中创建 Pod 安全策略。在将默认 PSP 分配给现有项目之前,你必须有一个可分配的 PSP。有关说明,请参阅[创建 Pod 安全策略](../../new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md)。
+- 将默认 Pod 安全策略分配给项目所属的集群。如果 PSP 还没有应用到集群,你无法将 PSP 分配给项目。有关详细信息,请参阅[将 pod 安全策略添加到集群](../../new-user-guides/manage-clusters/add-a-pod-security-policy.md)。
+
+### 应用 Pod 安全策略
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到需要移动命名空间的集群,然后单击 **Explore**。
+1. 单击**集群 > 项目/命名空间**。
+1. 找到要添加 PSP 的项目。在该项目中选择 **⋮ > 编辑配置**。
+1. 从 **Pod 安全策略**下拉列表中,选择要应用于项目的 PSP。
+ 将 PSP 分配给项目将:
+
+- 覆盖集群的默认 PSP。
+- 将 PSP 应用于项目。
+- 将 PSP 应用到后续添加到项目中的命名空间。
+
+1. 单击**保存**。
+
+**结果**:已将 PSP 应用到项目以及项目内的命名空间。
+
+:::note
+
+对于在分配 PSP 之前已经在集群或项目中运行工作负载,Rancher 不会检查它们是否符合 PSP。你需要克隆或升级工作负载以查看它们是否通过 PSP。
+
+:::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md
new file mode 100644
index 00000000000..26e87e8f31a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md
@@ -0,0 +1,67 @@
+---
+title: Rancher 项目中资源配额的工作原理
+---
+
+Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes.io/docs/concepts/policy/resource-quotas/)相同的功能。Rancher 还扩展了资源配额的功能,让你将资源配额应用于项目。
+
+在标准 Kubernetes deployment 中,资源配额会应用于各个命名空间。但是,你不能通过单次操作将配额应用到多个命名空间,而必须多次应用资源配额。
+
+在下图中,Kubernetes 管理员试图在没有 Rancher 的情况下强制执行资源配额。管理员想要使用一个资源配额来为集群中的每个命名空间配置统一的 CPU 和内存限制 (`Namespace 1-4`)。但是,在 Kubernetes 的基础版本中,每个命名空间都需要单独设置资源配额。因此,管理员必须创建四个配置相同规格的不同资源配额(`Resource Quota 1-4`)并单独应用这些配额。
+
+Kubernetes 基础版本:每个命名空间都需要独立设置资源配额
+
+
+
+和原生 Kubernetes 相比,Rancher 的资源配额有不同。在 Rancher 中,你可以把资源配额应用到项目层级,进而让项目的资源配额沿用到项目内的每一个命名空间,然后 Kubernetes 会使用原生的资源配额来强制执行你设置的限制。如果要更改特定命名空间的配额,你也可以覆盖设置。
+
+项目配额包括你在创建或编辑集群时设置的两个限制:
+
+
+- **项目限制**:
+
+ 配置了项目中所有命名空间共享的每个指定资源的总限制。
+
+- **命名空间默认限制**:
+
+ 配置了每个命名空间对每个指定资源的默认配额。
+ 如果项目中的命名空间配置没有被覆盖,那么此限制会自动绑定到命名空间并强制执行。
+
+
+在下图中,Rancher 管理员想使用资源配额来为项目中的每个命名空间(`命名空间 1-4`)设置相同的 CPU 和内存限制。在 Rancher 中,管理员可以为项目设置资源配额(`项目资源配额`),而不需要为命名空间单独进行设置。此配额包括整个项目(`项目限制`)和单个命名空间(`命名空间默认限制`)的资源限制。然后,Rancher 会将`命名空间默认限制`的配额沿用到每个命名空间(`命名空间资源配额`)。
+
+Rancher:资源配额沿用到每个命名空间
+
+
+
+以下介绍在 Rancher UI **_中_** 创建的命名空间的更细微的功能。如果你删除了项目级别的资源配额,无论命名空间层级是否有自定义的资源配额,项目内的所有命名空间也会移除这个资源配额。在项目层级修改已有的命名空间默认资源配额,不会影响命名空间内的资源配额,修改后的项目层级资源配额只会对以后新建的命名空间生效。要修改多个现有命名空间的默认限制,你可以在项目层级删除该限制,然后再使用新的默认值重新创建配额。这种方式会将新的默认值应用于项目中的所有现有命名空间。
+
+在项目中创建命名空间之前,Rancher 会使用默认限制和覆盖限制来对比项目内的可用资源和请求资源。
+如果请求的资源超过了项目中这些资源的剩余容量,Rancher 将为命名空间分配该资源的剩余容量。
+
+但是,在 Rancher 的 UI **_外_** 创建的命名空间的处理方法则不一样。对于通过 `kubectl` 创建的命名空间,如果请求的资源量多余项目内的余量,Rancher 会分配一个数值为 **0** 的资源配额。
+
+要使用 `kubectl` 在现有项目中创建命名空间,请使用 `field.cattle.io/projectId` 注释。要覆盖默认的请求配额限制,请使用 `field.cattle.io/resourceQuota` 注释。
+
+请注意,Rancher 只会覆盖项目配额上定义的资源限制。
+
+```
+apiVersion: v1
+kind: Namespace
+metadata:
+ annotations:
+ field.cattle.io/projectId: [your-cluster-ID]:[your-project-ID]
+ field.cattle.io/resourceQuota: '{"limit":{"limitsCpu":"100m", "configMaps": "50"}}'
+ name: my-ns
+```
+在此示例中,如果项目配额在其资源列表中不包含 configMaps,那么 Rancher 将忽略此覆盖中的 `configMaps`。
+
+对于项目中未定义的资源,建议你在命名空间中创建专用的 `ResourceQuota` 对象来配置其它自定义限制。
+资源配额是原生 Kubernetes 对象,如果命名空间属于具有配额的项目,Rancher 将忽略用户定义的配额,从而给予用户更多的控制权。
+
+下表对比了 Rancher 和 Kubernetes 资源配额的主要区别:
+
+| Rancher 资源配额 | Kubernetes 资源配额 |
+| ---------------------------------------------------------- | -------------------------------------------------------- |
+| 应用于项目和命名空间。 | 仅应用于命名空间。 |
+| 为项目中的所有命名空间创建资源池。 | 将静态资源限制应用到单独的命名空间。 |
+| 通过沿用的模式,将资源配额应用于各个命名空间。 | 仅应用于指定的命名空间。 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-project-resource-quotas.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
similarity index 87%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-project-resource-quotas.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
index 89c8a760f5e..ce57b2db349 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/manage-project-resource-quotas.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/manage-project-resource-quotas.md
@@ -2,13 +2,17 @@
title: 项目资源配额
---
+
+
+
+
如果多个团队共享一个集群,某个团队可能会使用过多的可用资源,例如 CPU、内存、存储、服务、Kubernetes 对象(如 Pod 或 Secret)等。你可以应用 _资源配额_ 来防止过度消耗资源。资源配额是 Rancher 用来限制项目或命名空间可用资源的功能。
本文介绍如何在现有项目中创建资源配额。
-你也可以在创建新项目时设置资源配额。有关详细信息,请参阅[创建新项目](../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md#创建项目)。
+你也可以在创建新项目时设置资源配额。有关详细信息,请参阅[创建新项目](../../../new-user-guides/manage-clusters/projects-and-namespaces.md#创建项目)。
-Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes.io/docs/concepts/policy/resource-quotas/)相同的功能。Rancher 还扩展了资源配额的功能,从而让你将资源配额应用于项目。有关资源配额如何与 Rancher 中的项目一起使用的详细信息,请参阅[此页面](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md)。
+Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes.io/docs/concepts/policy/resource-quotas/)相同的功能。Rancher 还扩展了资源配额的功能,从而让你将资源配额应用于项目。有关资源配额如何与 Rancher 中的项目一起使用的详细信息,请参阅[此页面](about-project-resource-quotas.md)。
### 将资源配额应用于现有项目
@@ -30,7 +34,7 @@ Rancher 中的资源配额包含与 [Kubernetes 原生版本](https://kubernetes
1. 展开**资源限额**并单击**添加资源**。你也可以编辑现有配额。
-1. 选择资源类型。有关类型的更多信息,请参阅[配额类型参考](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md)。
+1. 选择资源类型。有关类型的更多信息,请参阅[配额类型参考](resource-quota-types.md)。
1. 输入**项目限制**和**命名空间默认限制**的值。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
new file mode 100644
index 00000000000..f29c3004b85
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md
@@ -0,0 +1,34 @@
+---
+title: 覆盖命名空间的默认限制
+---
+
+**命名空间默认限制**会在创建时从项目沿用到每个命名空间。但在某些情况下,你可能需要增加或减少特定命名空间的配额。在这种情况下,你可以通过编辑命名空间来覆盖默认限制。
+
+在下图中,Rancher 管理员的项目有一个已生效的资源配额。但是,管理员想要覆盖 `Namespace 3` 的命名空间限制,以便让该命名空间使用更多资源。因此,管理员[提高了 `Namespace 3` 的命名空间限制](../../../new-user-guides/manage-clusters/projects-and-namespaces.md),以便命名空间可以访问更多资源。
+
+命名空间默认限制覆盖
+
+
+
+有关详细信息,请参阅[如何编辑命名空间资源配额](../../../new-user-guides/manage-clusters/projects-and-namespaces.md)。
+
+### 编辑命名空间资源配额
+
+如果你已为项目配置了资源配额,你可以覆盖命名空间默认限制,从而为特定命名空间提供对更多(或更少)项目资源的访问权限:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要编辑命名空间资源配额的集群,然后单击 **Explore**。
+1. 单击**集群 > 项目/命名空间**。
+1. 找到要为其编辑资源配额的命名空间。单击 **⋮ > 编辑配置**。
+1. 编辑资源限制。这些限制决定了命名空间可用的资源。必须在项目限制范围内配置这些配额限制。
+
+ 有关每个**资源类型**的详细信息,请参阅[类型参考](resource-quota-types.md)。
+
+ :::note
+
+ - 如果没有为项目配置资源配额,这些选项将不可用。
+ - 如果你输入的限制超过了配置的项目限制,你将无法保存修改。
+
+ :::
+
+**结果**:覆盖设置已经应用到命名空间的资源配额。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md
new file mode 100644
index 00000000000..b0873befd7a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md
@@ -0,0 +1,27 @@
+---
+title: 资源配额类型参考
+---
+
+创建资源配额相当于配置项目可用的资源池。你可以为以下资源类型设置资源配额:
+
+| 资源类型 | 描述 |
+| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| CPU 限制\* | 分配给项目/命名空间的最大 CPU 量(以[毫核](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)为单位)1 |
+| CPU 预留\* | 预留给项目/命名空间的最小 CPU 量(以毫核为单位)1 |
+| 内存限制\* | 分配给项目/命名空间的最大内存量(以字节为单位)1 |
+| 内存预留\* | 预留给项目/命名空间的最小内存量(以字节为单位)1 |
+| 存储预留 | 预留给项目/命名空间的最小存储量(以千兆字节为单位) |
+| 服务负载均衡器 | 项目/命名空间中可以存在的负载均衡器服务的最大数量 |
+| 服务节点端口 | 项目/命名空间中可以存在的节点端口服务的最大数量 |
+| Pod | 可以在项目/命名空间中以非终端状态存在的 pod 的最大数量(即 `.status.phase in (Failed, Succeeded)` 等于 true 的 pod) |
+| Services | 项目/命名空间中可以存在的最大 service 数量 |
+| ConfigMap | 项目/命名空间中可以存在的 ConfigMap 的最大数量 |
+| 持久卷声明 | 项目/命名空间中可以存在的持久卷声明的最大数量 |
+| ReplicationController | 项目/命名空间中可以存在的最大 ReplicationController 数量 |
+| 密文 | 项目/命名空间中可以存在的最大密文数量 |
+
+:::note *** **
+
+在设置资源配额时,如果你在项目或命名空间上设置了任何与 CPU 或内存相关的内容(即限制或预留),所有容器都需要在创建期间设置各自的 CPU 或内存字段。你可以同时设置容器的默认资源限制,以避免为每个工作负载显式设置这些限制。详情请参阅 [Kubernetes 文档](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits)。
+
+:::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
new file mode 100644
index 00000000000..e56f617e7a0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md
@@ -0,0 +1,40 @@
+---
+title: 设置容器默认资源限制
+---
+
+在设置资源配额时,如果你在项目或命名空间上设置了任何与 CPU 或内存相关的内容(即限制或预留),所有容器都需要在创建期间设置各自的 CPU 或内存字段。详情请参阅 [Kubernetes 文档](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits)。
+
+为了避免在创建工作负载期间对每个容器设置这些限制,可以在命名空间上指定一个默认的容器资源限制。
+
+### 编辑容器默认资源限制
+
+你可以在以下情况下编辑容器的默认资源限制:
+
+- 你在项目上设置了 CPU 或内存资源配额,现在需要为容器设置相应的默认值。
+- 你需要编辑容器的默认资源限制。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要编辑默认资源限制的集群,然后单击 **Explore**。
+1. 单击**集群 > 项目/命名空间**。
+1. 找到要编辑容器默认资源限制的项目。在该项目中选择 **⋮ > 编辑配置**。
+1. 展开**容器默认资源限制**并编辑对应的值。
+
+### 沿用资源限制
+
+在项目级别设置默认容器资源限制后,项目中所有新建的命名空间都会沿用这个资源限制参数。新设置的限制不会影响项目中现有的命名空间。你需要为项目中的现有命名空间手动设置默认容器资源限制,以便创建容器时能应用该限制。
+
+你可以为项目设置容器的默认资源限制并启动任何商店应用。
+
+在命名空间上配置容器默认资源限制后,在该命名空间中创建的任何容器都会沿用该默认值。你可以在工作负载创建期间覆盖这些限制/预留。
+
+### 容器资源配额类型
+
+可以配置以下资源限制:
+
+| 资源类型 | 描述 |
+| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| CPU 限制 | 分配给容器的最大 CPU 量(以[毫核](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)为单位)。 |
+| CPU 预留 | 保留给容器的最小 CPU 量(以毫核为单位)。 |
+| 内存限制 | 分配给容器的最大内存量(以字节为单位)。 |
+| 内存预留 | 保留给容器的最小内存量(以字节为单位)。 |
+| NVIDIA GPU 限制/预留 | 分配给容器的 GPU 数量。GPU 的限制和预留始终相同。 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
new file mode 100644
index 00000000000..1ce6b33f141
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/manage-projects/manage-projects.md
@@ -0,0 +1,41 @@
+---
+title: 项目管理
+---
+
+
+
+
+
+_项目_ 是 Rancher 中引入的对象,可帮助你更有组织地管理 Kubernetes 集群中的命名空间。你可以使用项目创建多租户集群,这种集群允许一组用户共享相同的底层资源来创建应用,而应用之间不会相互影响。
+
+在层次结构方面:
+
+- 集群包含项目
+- 项目包含命名空间
+
+在 Rancher 中,你可以使用项目将多个命名空间作为一个实体进行管理。在原生 Kubernetes(没有项目这个概念)中,RBAC 或集群资源等功能被分配给了各个命名空间。如果集群中的多个命名空间需要分配同样的访问权限,分配权限会变得非常繁琐。即使所有命名空间都需要相同的权限,但也无法使用一个操作中将这些权限应用于所有命名空间。你必须重复地将这些权限分配给每个命名空间。
+
+而 Rancher 通过引入项目的概念,通过允许你在项目级别应用资源和访问权限。然后,项目中的每个命名空间都会继承这些资源和策略。因此你只需将资源和策略分配给项目即可,不需要将它们分配给每个单独的命名空间。
+
+你可以使用项目执行以下操作:
+
+- [为用户分配一组命名空间的访问权限](../../new-user-guides/add-users-to-projects.md)
+- 为用户分配[项目中的特定角色](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。角色可以是所有者、成员、只读或[自定义](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md)
+- [设置资源配额](manage-project-resource-quotas/manage-project-resource-quotas.md)
+- [管理命名空间](../../new-user-guides/manage-namespaces.md)
+- [配置工具](../../../reference-guides/rancher-project-tools.md)
+- [配置 Pod 安全策略](manage-pod-security-policies.md)
+
+### 授权
+
+非管理者用户只有在[管理员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)、[集群所有者或成员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)或[项目所有者](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)将非管理员用户添加到项目的**成员**选项卡后,才能获取项目的访问权限。
+
+创建项目的人自动成为[项目所有者](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#项目角色)。
+
+## 在项目之间切换
+
+要在项目之间切换,请使用导航栏中的下拉菜单。你也可以直接在导航栏中切换项目:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面,进入要切换项目的集群然后点击 **Explore**。
+1. 在顶部导航栏中,选择要打开的项目。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md
new file mode 100644
index 00000000000..e5a2deccf45
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md
@@ -0,0 +1,148 @@
+---
+title: 持久化 Grafana 仪表板
+---
+
+要在重启 Grafana 实例后保存 Grafana 仪表板,请将仪表板的配置 JSON 添加到 ConfigMap 中。ConfigMap 还支持使用基于 GitOps 或 CD 的方法来部署仪表板,从而让你对仪表板进行版本控制。
+
+- [创建持久化 Grafana 仪表板](#创建持久化-grafana-仪表板)
+- [已知问题](#已知问题)
+
+## 创建持久化 Grafana 仪表板
+
+
+
+
+:::note 先决条件:
+
+- 已安装 Monitoring 应用。
+- 要创建持久化仪表板,你必须在包含 Grafana 仪表板的项目或命名空间中至少具有**管理 ConfigMap** 的 Rancher RBAC 权限。这与 Monitoring Chart 公开的 `monitoring-dashboard-edit` 或 `monitoring-dashboard-admin` Kubernetes 原生 RBAC 角色对应。
+- 要查看指向外部监控 UI(包括 Grafana 仪表板)的链接,你至少需要一个 [project-member 角色](../../../integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#具有-rancher-权限的用户)。
+
+:::
+
+### 1. 获取要持久化的仪表板的 JSON 模型
+
+要创建持久化仪表板,你需要获取要持久化的仪表板的 JSON 模型。你可以使用预制仪表板或自行构建仪表板。
+
+要使用预制仪表板,请转到 [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards),打开详细信息页面,然后单击 **Download JSON** 按钮来获取下一步所需的 JSON 模型。
+
+要使用你自己的仪表板:
+
+1. 点击链接打开 Grafana。在集群详细信息页面上,单击 **Monitoring**。
+1. 登录到 Grafana。请注意,Grafana 实例的默认 Admin 用户名和密码是 `admin/prom-operator`。你还可以在部署或升级 Chart 时替换凭证。
+
+ :::note
+
+ 无论谁拥有密码,你都需要在部署了 Rancher Monitoring 的项目中至少具有管理服务 或查看监控 的权限才能访问 Grafana 实例。你还可以在部署或升级 Chart 时替换凭证。
+
+ :::
+
+1. 使用 Grafana UI 创建仪表板。完成后,单击顶部导航菜单中的齿轮图标转到仪表板设置页面。在左侧导航菜单中,单击 **JSON Model**。
+1. 复制出现的 JSON 数据结构。
+
+### 2. 使用 Grafana JSON 模型创建 ConfigMap
+
+在包含 Grafana 仪表板的命名空间中创建一个 ConfigMap(默认为 cattle-dashboards )。
+
+ConfigMap 与以下内容类似:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ grafana_dashboard: "1"
+ name:
+ namespace: cattle-dashboards # 如果不使用默认命名空间,则修改此值
+data:
+ .json: |-
+
+```
+
+默认情况下,Grafana 配置为监控 `cattle-dashboards` 命名空间中带有 `grafana_dashboard` 标签的所有 ConfigMap。
+
+要让 Grafana 监控所有命名空间中的 ConfigMap,请参阅[本节](#为-grafana-仪表板-configmap-配置命名空间)。
+
+要在 Rancher UI 中创建 ConfigMap:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要可视化的集群,然后单击 **Explore**。
+1. 单击**更多资源 > 核心 > 配置映射**。
+1. 单击**创建**。
+1. 设置与上例类似的键值对。输入 `.json` 的值时,点击**从文件读取**并上传 JSON 数据模型。
+1. 单击**创建**。
+
+**结果**:创建 ConfigMap 后,即使 Grafana pod 重启了,ConfigMap 也能显示在 Grafana UI 上并持久化。
+
+无法在 Grafana UI 中删除或编辑使用 ConfigMap 持久化了的仪表板。
+
+如果你在 Grafana UI 中删除仪表板,你将看到 "Dashboard cannot be deleted because it was provisioned" 的错误消息。如需删除仪表板,你需要删除 ConfigMap。
+
+### 为 Grafana 仪表板 ConfigMap 配置命名空间
+
+要让 Grafana 监控所有命名空间中的 ConfigMap,请在 `rancher-monitoring` Helm chart 中指定以下值:
+
+```
+grafana.sidecar.dashboards.searchNamespace=ALL
+```
+
+请注意,Monitoring Chart 用于添加 Grafana 仪表板的 RBAC 角色仅能让用户将仪表板添加到定义在 `grafana.dashboards.namespace` 中的命名空间,默认为 `cattle-dashboards`。
+
+
+
+
+:::note 先决条件:
+
+- 已安装 Monitoring 应用。
+- 你必须具有 cluster-admin ClusterRole 权限。
+
+:::
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要在其中配置 Grafana 命名空间的集群,然后单击 **Explore**。
+1. 在左侧导航栏中,单击**监控**。
+1. 点击 **Grafana**。
+1. 登录到 Grafana。请注意,Grafana 实例的默认 Admin 用户名和密码是 `admin/prom-operator`。你还可以在部署或升级 Chart 时替换凭证。
+
+ :::note
+
+ 无论谁拥有密码,都需要 Rancher 的集群管理员权限才能访问 Grafana 实例。
+
+ :::
+
+1. 转到要进行持久化的仪表板。在顶部导航菜单中,通过单击齿轮图标转到仪表板设置。
+1. 在左侧导航菜单中,单击 **JSON Model**。
+1. 复制出现的 JSON 数据结构。
+1. 在 `cattle-dashboards` 命名空间中创建一个 ConfigMap。ConfigMap 需要有 `grafana_dashboard: "1"` 标签。将 JSON 粘贴到 ConfigMap 中,格式如下例所示:
+
+ ```yaml
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ labels:
+ grafana_dashboard: "1"
+ name:
+ namespace: cattle-dashboards
+ data:
+ .json: |-
+
+ ```
+
+**结果**:创建 ConfigMap 后,即使 Grafana pod 重启了,ConfigMap 也能显示在 Grafana UI 上并持久化。
+
+无法在 Grafana UI 中删除使用 ConfigMap 持久化了的仪表板。如果你在 Grafana UI 中删除仪表板,你将看到 "Dashboard cannot be deleted because it was provisioned" 的错误消息。如需删除仪表板,你需要删除 ConfigMap。
+
+为防止在卸载 Monitoring v2 时删除持久化的仪表板,请将以下注释添加到 `cattle-dashboards` 命名空间:
+
+```
+helm.sh/resource-policy: "keep"
+```
+
+
+
+
+## 已知问题
+
+如果你的 Monitoring V2 版本是 v9.4.203 或更低版本,卸载 Monitoring chart 将同时删除 `cattle-dashboards` 命名空间,所有持久化的仪表板将被删除(除非命名空间带有注释 `helm.sh/resource-policy: "keep"`)。
+
+Rancher 2.5.8 发布的新 Monitoring Chart 中默认添加了该注解,但使用早期 Rancher 版本的用户仍需手动应用该注释。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
new file mode 100644
index 00000000000..5fa126a9089
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md
@@ -0,0 +1,39 @@
+---
+title: 自定义 Grafana 仪表板
+---
+
+在本文中,你将学习通过自定义 Grafana 仪表板来显示特定容器的指标。
+
+### 先决条件
+
+在自定义 Grafana 仪表板之前,你必须先安装 `rancher-monitoring` 应用。
+
+要查看指向外部监控 UI(包括 Grafana 仪表板)的链接,你至少需要一个 [project-member 角色](../../../integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#具有-rancher-权限的用户)。
+
+### 登录 Grafana
+
+1. 在 Rancher UI 中,转到要自定义的仪表板的集群。
+1. 在左侧导航栏中,单击**监控**。
+1. 单击 **Grafana**。Grafana 仪表板将在新选项卡中打开。
+1. 转到左下角的登录图标,然后单击 **Sign In**。
+1. 登录到 Grafana。Grafana 实例的默认 Admin 用户名和密码是 `admin/prom-operator`(无论谁拥有密码,都需要 Rancher 的集群管理员权限才能访问 Grafana 实例)。你还可以在部署或升级 Chart 时替换凭证。
+
+
+### 获取支持 Grafana 面板的 PromQL 查询
+
+对于任何面板,你可以单击标题并单击 **Explore** 以获取支持图形的 PromQL 查询。
+
+例如,如果要获取 Alertmanager 容器的 CPU 使用率,点击 **CPU Utilization > Inspect**。
+
+**Data** 选项卡将基础数据显示为时间序列,第一列是时间,第二列是 PromQL 查询结果。复制 PromQL 查询。
+
+```
+(1 - (avg(irate({__name__=~"node_cpu_seconds_total|windows_cpu_time_total",mode="idle"}[5m])))) * 100
+```
+
+然后,你可以在 Grafana 面板中修改查询,或使用该查询创建新的 Grafana 面板。
+
+参考:
+
+- [编辑面板的 Grafana 文档](https://grafana.com/docs/grafana/latest/panels-visualizations/configure-panel-options/#edit-a-panel)
+- [向仪表板添加面板的 Grafana 文档](https://grafana.com/docs/grafana/latest/panels-visualizations/panel-editor-overview)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
new file mode 100644
index 00000000000..a51a45041ec
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md
@@ -0,0 +1,19 @@
+---
+title: 调试高内存用量
+---
+
+Prometheus 中的每个时间序列都由其[指标名称](https://prometheus.io/docs/practices/naming/#metric-names)和称为[标签](https://prometheus.io/docs/practices/naming/#labels)的可选键值对唯一标识。
+
+标签允许过滤和聚合时间序列数据,但它们也使 Prometheus 收集的数据量成倍增加。
+
+每个时间序列都有一组定义的标签,Prometheus 为所有唯一的标签组合生成一个新的时间序列。如果一个指标附加了两个标签,则会为该指标生成两个时间序列。更改任何标签值,包括添加或删除标签,都会创建一个新的时间序列。
+
+Prometheus 经过了优化,可以存储基于索引的序列数据。它是为相对一致的时间序列数量和相对大量的样本而设计的,这些样本需要随时间从 exporter 处收集。
+
+但是,Prometheus 没有就快速变化的时间序列数量进行对应的优化。因此,如果你在创建和销毁了大量资源的集群(尤其是多租户集群)上安装 Monitoring,可能会出现内存使用量激增的情况。
+
+### 减少内存激增
+
+为了减少内存消耗,Prometheus 可以通过抓取更少的指标或在时间序列上添加更少的标签,从而存储更少的时间序列。要查看使用内存最多的序列,你可以查看 Prometheus UI 中的 TSDB(时序数据库)状态页面。
+
+分布式 Prometheus 解决方案(例如 [Thanos](https://thanos.io/) 和 [Cortex](https://cortexmetrics.io/))使用了一个替代架构,该架构部署多个小型 Prometheus 实例。如果使用 Thanos,每个 Prometheus 的指标都聚合到通用的 Thanos 部署中,然后再将这些指标导出到 S3 之类的持久存储。这种架构更加健康,能避免给单个 Prometheus 实例带来过多时间序列,同时还能在全局级别查询指标。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md
new file mode 100644
index 00000000000..cfb2ebdf338
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md
@@ -0,0 +1,75 @@
+---
+title: 启用 Monitoring
+---
+
+[管理员](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)或[集群所有者](../../new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#集群角色)可以通过配置 Rancher 来部署 Prometheus,从而监控 Kubernetes 集群。
+
+本文介绍如何使用新的 monitoring 应用在集群内启用监控和告警。
+
+不论是否使用 SSL,你都可以启用 monitoring。
+
+## 要求
+
+- 在每个节点上允许端口 9796 上的流量。Prometheus 将从这些端口抓取指标。
+ - 如果 [PushProx](../../../integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#pushprox) 被禁用(`ingressNginx.enabled` 设置为 `false`),或者你已经升级了安装了 Monitoring V1 的 Rancher 版本,你可能还需要为每个节点允许端口 10254 上的流量。
+- 确保你的集群满足资源要求。集群应至少有 1950Mi 可用内存、2700m CPU 和 50Gi 存储。有关资源限制和请求的详细信息,请参阅[配置资源限制和请求](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#配置资源限制和请求)。
+- 在使用 RancherOS 或 Flatcar Linux 节点的 RKE 集群上安装 Monitoring 时,请将 etcd 节点证书目录更改为 `/opt/rke/etc/kubernetes/ssl`。
+- 如果集群是使用 RKE CLI 配置的,而且地址设置为主机名而不是 IP 地址,请在安装的 Values 配置步骤中将 `rkeEtcd.clients.useLocalhost` 设置为 `true`。例如:
+
+```yaml
+rkeEtcd:
+ clients:
+ useLocalhost: true
+```
+
+:::note
+
+如果要设置 Alertmanager、Grafana 或 Ingress,必须通过 Helm chart 部署上的设置来完成。在部署之外创建 Ingress 可能会产生问题。
+
+:::
+
+## 设置资源限制和请求
+
+安装 `rancher-monitoring` 时可以配置资源请求和限制。要从 Rancher UI 配置 Prometheus 资源,请单击左上角的 **Apps > Monitoring**。
+
+有关默认限制的更多信息,请参阅[此页面](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#配置资源限制和请求)。
+
+## 安装 Monitoring 应用
+
+### 在不使用 SSL 的情况下启用 Monitoring
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 点击**集群工具**(左下角)。
+1. 单击 Monitoring 的**安装**。
+1. 可选:在 Values 步骤中为 Alerting、Prometheus 和 Grafana 自定义请求、限制等。如需帮助,请参阅[配置参考](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md)。
+
+**结果**:Monitoring 应用已部署到 `cattle-monitoring-system` 命名空间中。
+
+### 在使用 SSL 的情况下启用 Monitoring
+
+1. 按照[此页面](../../new-user-guides/kubernetes-resources-setup/secrets.md)上的步骤创建密文,以便将 SSL 用于告警。
+- 密文应该创建到 `cattle-monitoring-system` 命名空间中。如果它不存在,请先创建它。
+- 将 `ca`、`cert` 和 `key` 文件添加到密文中。
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要启用 Monitoring 以与 SSL 一起使用的集群,然后单击 **Explore**。
+1. 单击 **Apps > Charts**。
+1. 单击 **Monitoring**。
+1. 根据你是否已安装 Monitoring,单击**安装**或**更新**。
+1. 选中**在安装前自定义 Helm 选项**,然后单击**下一步**。
+1. 单击 **Alerting**。
+1. 在**补充密文**字段中,添加之前创建的密文。
+
+**结果**:Monitoring 应用已部署到 `cattle-monitoring-system` 命名空间中。
+
+[创建接收器](../../../reference-guides/monitoring-v2-configuration/receivers.md#在-rancher-ui-中创建接收器)时,启用 SSL 的接收器(例如电子邮件或 webhook)将具有 **SSL**,其中包含 **CA 文件路径**、**证书文件路径**和**密钥文件路径**字段。使用 `ca`、`cert` 和 `key` 的路径填写这些字段。路径的格式为 `/etc/alertmanager/secrets/name-of-file-in-secret`。
+
+例如,如果你使用以下键值对创建了一个密文:
+
+```yaml
+ca.crt=`base64-content`
+cert.pem=`base64-content`
+key.pfx=`base64-content`
+```
+
+则**证书文件路径**需要设为 `/etc/alertmanager/secrets/cert.pem`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/monitoring-alerting-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/monitoring-alerting-guides.md
new file mode 100644
index 00000000000..16a504ae226
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/monitoring-alerting-guides.md
@@ -0,0 +1,15 @@
+---
+title: Monitoring/Alerting 指南
+---
+
+
+
+
+
+- [启用 Monitoring](enable-monitoring.md)
+- [卸载 Monitoring](uninstall-monitoring.md)
+- [为工作负载设置 Monitoring](set-up-monitoring-for-workloads.md)
+- [自定义 Grafana 仪表板](customize-grafana-dashboard.md)
+- [持久化 Grafana 仪表板](create-persistent-grafana-dashboard.md)
+- [调试高内存用量](debug-high-memory-usage.md)
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md
new file mode 100644
index 00000000000..44d69d05f9e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md
@@ -0,0 +1,7 @@
+---
+title: 自定义 Grafana 仪表板
+---
+
+无论是用于 rancher-monitoring 还是 Prometheus Federator,Grafana 仪表板的定制方式都是相同的。
+
+有关说明,请参阅[此页面](../customize-grafana-dashboard.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
new file mode 100644
index 00000000000..c6bac839971
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md
@@ -0,0 +1,86 @@
+---
+title: 启用 Prometheus Federator
+---
+
+## 要求
+
+默认情况下,Prometheus Federator 已配置并旨在与 [rancher-monitoring](../../../../pages-for-subheaders/monitoring-and-alerting.md) 一起部署。rancher-monitoring 同时部署了 Prometheus Operator 和 Cluster Prometheus,每个项目监控堆栈(Project Monitoring Stack)默认会联合命名空间范围的指标。
+
+有关安装 rancher-monitoring 的说明,请参阅[此页面](../enable-monitoring.md)。
+
+默认配置与你的 rancher-monitoring 堆栈是兼容的。但是,为了提高集群中 Prometheus Federator 的安全性和可用性,我们建议对 rancher-monitoring 进行以下额外的配置:
+
+- [确保 cattle-monitoring-system 命名空间位于 System 项目中](#确保-cattle-monitoring-system-命名空间位于-system-项目中或者位于一个锁定并能访问集群中其他项目的项目中)
+- [将 rancher-monitoring 配置为仅监视 Helm Chart 创建的资源](#将-rancher-monitoring-配置为仅监视-helm-chart-创建的资源)
+- [提高 Cluster Prometheus 的 CPU/内存限制](#提高-cluster-prometheus-的-cpu内存限制)
+
+### 确保 cattle-monitoring-system 命名空间位于 System 项目中(或者位于一个锁定并能访问集群中其他项目的项目中)
+
+
+
+Prometheus Operator 的安全模型有一定的要求,即希望部署它的命名空间(例如,`cattle-monitoring-system`)对除集群管理员之外的任何用户都只有有限的访问权限,从而避免通过 Pod 内执行(例如正在执行的 Helm 操作的 Job)来提升权限。此外,如果将 Prometheus Federator 和所有 Project Prometheus 堆栈都部署到 System 项目中,即使网络策略是通过项目网络隔离定义的,每个 Project Prometheus 都依然能够在所有项目中抓取工作负载。它还为项目所有者、项目成员和其他用户提供有限的访问权限,从而确保这些用户无法访问他们不应访问的数据(例如,在 pod 中执行,在项目外部抓取命名空间数据等)。
+
+1. 打开 `System` 项目以检查你的命名空间:
+
+ 在 Rancher UI 中单击 **Cluster > Projects/Namespaces**。这将显示 `System` 项目中的所有命名空间:
+
+ 
+
+1. 如果你在 `cattle-monitoring-system` 命名空间中已经安装了一个 Monitoring V2,但该命名空间不在 `System` 项目中,你可以将 `cattle- monitoring-system` 命名空间移动到 `System` 项目或另一个访问受限的项目中。为此,你有以下两种方法:
+
+ - 将命名空间拖放到 `System` 项目。
+ - 选择命名空间右侧的 **⋮**,点击 **Move**,然后从 **Target Project** 下拉列表中选择 `System`:
+
+ 
+
+### 将 rancher-monitoring 配置为仅监视 Helm Chart 创建的资源
+
+每个项目监控堆栈都会监视其他命名空间并收集其他自定义工作负载指标或仪表板。因此,我们建议在所有选择器上配置以下设置,以确保 Cluster Prometheus 堆栈仅监控由 Helm Chart 创建的资源:
+
+```
+matchLabels:
+ release: "rancher-monitoring"
+```
+
+建议为以下选择器字段赋予此值:
+- `.Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector`
+- `.Values.prometheus.prometheusSpec.serviceMonitorSelector`
+- `.Values.prometheus.prometheusSpec.podMonitorSelector`
+- `.Values.prometheus.prometheusSpec.ruleSelector`
+- `.Values.prometheus.prometheusSpec.probeSelector`
+
+启用此设置后,你始终可以通过向它们添加 `release: "rancher-monitoring"` 标签来创建由 Cluster Prometheus 抓取的 ServiceMonitor 或 PodMonitor。在这种情况下,即使这些 ServiceMonitor 或 PodMonitor 所在的命名空间不是 System 命名空间,项目监控堆栈也会默认自动忽略它们。
+
+:::note
+
+如果你不希望用户能够在 Project 命名空间中创建聚合到 Cluster Prometheus 中的 ServiceMonitor 和 PodMonitor,你可以另外将 Chart 上的 namespaceSelectors 设置为仅目标 System 命名空间(必须包含 `cattle-monitoring-system` 和 `cattle-dashboards`,默认情况下资源通过 rancher-monitoring 部署到该命名空间中。你还需要监控 `default` 命名空间以获取 apiserver 指标,或创建自定义 ServiceMonitor 以抓取位于 `default` 命名空间中的 Service 的 apiserver 指标),从而限制你的 Cluster Prometheus 获取其他 Prometheus Operator CR。在这种情况下,建议设置 `.Values.prometheus.prometheusSpec.ignoreNamespaceSelectors=true`。这样,你可以定义能从 System 命名空间中监视非 System 命名空间的 ServiceMonitor。
+
+:::
+
+### 提高 Cluster Prometheus 的 CPU/内存限制
+
+根据集群的设置,我们一般建议为 Cluster Prometheus 配置大量的专用内存,以避免因内存不足的错误(OOMKilled)而重启。通常情况下,集群中创建的改动项(churn)会导致大量高基数指标在一个时间块内生成并被 Prometheus 引入,然后导致这些错误。这也是为什么默认的 Rancher Monitoring 堆栈希望能分配到大约 4GB 的 RAM 以在正常大小的集群中运行的原因之一。但是,如果你引入向同一个 Cluster Prometheus 发送 `/federate` 请求的项目监控堆栈,并且项目监控堆栈依赖于 Cluster Prometheus 的启动状态来在其命名空间上联合系统数据,那么你更加需要为 Cluster Prometheus 分配足够的 CPU/内存,以防止集群中的所有 Prometheus 项目出现数据间隙的中断。
+
+:::note
+
+我们没有 Cluster Prometheus 内存配置的具体建议,因为这完全取决于用户的设置(即遇到高改动率的可能性以及可能同时生成的指标的规模)。不同的设置通常有不同的推荐参数。
+
+:::
+
+## 安装 Prometheus Federator 应用程序
+
+1. 点击 **☰ > 集群管理**。
+1. 转到要安装 Prometheus Federator 的集群,然后单击 **Explore**。
+1. 点击**应用 > Charts**。
+1. 单击 **Prometheus Federator** Chart。
+1. 单击**安装**。
+1. 在**元数据**页面,点击**下一步**。
+1. 在**命名空间** > **项目 Release 命名空间项目 ID** 字段中,`System 项目`是默认值,但你可以使用具有类似[有限访问权限](#确保-cattle-monitoring-system-命名空间位于-system-项目中或者位于一个锁定并能访问集群中其他项目的项目中)的另一个项目覆盖它。你可以在 local 上游集群中运行以下命令来找到项目 ID:
+
+```plain
+kubectl get projects -A -o custom-columns="NAMESPACE":.metadata.namespace,"ID":.metadata.name,"NAME":.spec.displayName
+```
+
+1. 单击**安装**。
+
+**结果**:Prometheus Federator 应用程序已部署在 `cattle-monitoring-system` 命名空间中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md
new file mode 100644
index 00000000000..9dba3d6d401
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/project-monitors.md
@@ -0,0 +1,17 @@
+---
+title: 安装 Project Monitor
+---
+
+在要启用项目监控的各个项目中安装 **Project Monitor**。
+
+1. 点击 **☰ > 集群管理**。
+
+1. 在**集群**页面中,转到要启用监控的集群,然后单击 **Explore**。
+
+1. 单击左侧导航栏上的**监控 > Project Monitor**。然后,点击右上角的**创建**。
+
+ 
+
+1. 从下拉菜单中选择你的项目,然后再次单击**创建**。
+
+ 
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/prometheus-federator-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/prometheus-federator-guides.md
new file mode 100644
index 00000000000..7bc98a902e4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/prometheus-federator-guides.md
@@ -0,0 +1,12 @@
+---
+title: Prometheus Federator 指南
+---
+
+
+
+
+
+- [启用 Prometheus Federator](enable-prometheus-federator.md)
+- [卸载 Prometheus Federator](uninstall-prometheus-federator.md)
+- [自定义 Grafana 仪表板](customize-grafana-dashboards.md)
+- [为工作负载设置 Prometheus Federator](set-up-workloads.md)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
new file mode 100644
index 00000000000..10aaf5d95b7
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads.md
@@ -0,0 +1,13 @@
+---
+title: 为工作负载设置 Prometheus Federator
+---
+
+### 显示工作负载的 CPU 和内存指标
+
+使用 Prometheus Federator 显示 CPU 和内存指标的方式与使用 rancher-monitoring 相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#显示工作负载的-cpu-和内存指标)。
+
+### 设置 CPU 和内存之外的指标
+
+使用 Prometheus Federator 设置 CPU 和内存之外的指标与使用 rancher-monitoring 的方式相同。有关说明,请参阅[此处](../set-up-monitoring-for-workloads.md#设置-cpu-和内存之外的指标)。
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md
new file mode 100644
index 00000000000..c6b04a736bb
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md
@@ -0,0 +1,13 @@
+---
+title: 卸载 Prometheus Federator
+---
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,点击 **Apps**。
+1. 点击**已安装的应用**。
+1. 转到 `cattle-monitoring-system` 命名空间并选中 `rancher-monitoring-crd` 和 `rancher-monitoring`。
+1. 单击**删除**。
+1. 确认**删除**。
+
+**结果**:已卸载 `prometheus-federator`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
new file mode 100644
index 00000000000..7ab5dd36f79
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md
@@ -0,0 +1,27 @@
+---
+title: 为工作负载设置 Monitoring
+---
+
+如果你只需要工作负载的 CPU 和内存时间序列,则不需要部署 ServiceMonitor 或 PodMonitor,因为 Monitoring 应用默认会收集资源使用情况的指标数据。
+
+设置工作负载监控的步骤取决于你是否需要基本指标(例如工作负载的 CPU 和内存),或者是否需要从工作负载中抓取自定义指标。
+
+如果你只需要工作负载的 CPU 和内存时间序列,则不需要部署 ServiceMonitor 或 PodMonitor,因为 Monitoring 应用默认会收集资源使用情况的指标数据。资源使用的时间序列数据在 Prometheus 的本地时间序列数据库中。
+
+Grafana 显示聚合数据,你也可以使用 PromQL 查询来查看单个工作负载的数据。进行 PromQL 查询后,你可以在 Prometheus UI 中单独执行查询并查看可视化的时间序列,你也可以使用查询来自定义显示工作负载指标的 Grafana 仪表板。有关工作负载指标的 PromQL 查询示例,请参阅[本节](../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md#工作负载指标)。
+
+要为你的工作负载设置自定义指标,你需要设置一个 Exporter 并创建一个新的 ServiceMonitor 自定义资源,从而将 Prometheus 配置为从 Exporter 中抓取指标。
+
+### 显示工作负载的 CPU 和内存指标
+
+默认情况下,Monitoring 应用会抓取 CPU 和内存指标。
+
+要获取特定工作负载的细粒度信息,你可以自定义 Grafana 仪表板来显示该工作负载的指标。
+
+### 设置 CPU 和内存之外的指标
+
+对于自定义指标,你需要使用 Prometheus 支持的格式来公开应用上的指标。
+
+我们建议你创建一个新的 ServiceMonitor 自定义资源。创建此资源时,Prometheus 自定义资源将自动更新,以便将新的自定义指标端点包含在抓取配置中。然后 Prometheus 会开始从端点抓取指标。
+
+你还可以创建 PodMonitor 来公开自定义指标端点,但 ServiceMonitor 更适合大多数用例。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md
new file mode 100644
index 00000000000..06845311909
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md
@@ -0,0 +1,19 @@
+---
+title: 卸载 Monitoring
+---
+
+1. 点击 **☰ > 集群管理**。
+1. 选择你创建的集群,并点击 **Explore**。
+1. 在左侧导航栏中,点击 **Apps**。
+1. 点击**已安装的应用**。
+1. 转到 `cattle-monitoring-system` 命名空间并选中 `rancher-monitoring-crd` 和 `rancher-monitoring`。
+1. 单击**删除**。
+1. 确认**删除**。
+
+**结果**:已卸载 `rancher-monitoring`。
+
+:::note 持久化 Grafana 仪表板:
+
+如果你的 Monitoring V2 版本是 v9.4.203 或更低版本,卸载 Monitoring chart 将同时删除 cattle-dashboards 命名空间,所有持久化的仪表板将被删除(除非命名空间带有注释 `helm.sh/resource-policy: "keep"`)。Monitoring V2 v14.5.100+ 会默认添加此注释。但如果你的集群上安装了旧版本的 Monitoring Chart,你可以在卸载它之前手动将注释应用到 cattle-dashboards 命名空间。
+
+:::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
new file mode 100644
index 00000000000..549bc6fa852
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/advanced-configuration.md
@@ -0,0 +1,19 @@
+---
+title: 高级配置
+---
+
+
+
+
+
+### Alertmanager
+
+有关配置 Alertmanager 自定义资源的信息,请参阅[此页面。](alertmanager.md)
+
+### Prometheus
+
+有关配置 Prometheus 自定义资源的信息,请参阅[此页面。](prometheus.md)
+
+### PrometheusRules
+
+有关配置 PrometheusRules 自定义资源的信息,请参阅[此页面。](prometheusrules.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md
new file mode 100644
index 00000000000..77851a3accb
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md
@@ -0,0 +1,43 @@
+---
+title: Alertmanager 配置
+---
+
+通常情况下,你不需要直接编辑 Alertmanager 自定义资源。对于大多数用例,只需要编辑接收器和路由即可配置通知。
+
+当路由和接收器更新时,Monitoring 应用将自动更新 Alertmanager 自定义资源来与这些更改保持一致。
+
+:::note
+
+本节参考假设你已经熟悉 Monitoring 组件的协同工作方式。有关 Alertmanager 的详细信息,请参阅[本节](../../../../integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#3-alertmanager-的工作原理)。
+
+:::
+
+## 关于 Alertmanager 自定义资源
+
+默认情况下,Rancher Monitoring 将单个 Alertmanager 部署到使用默认 Alertmanager Config Secret 的集群上。
+
+如果你想使用 Rancher UI 表单中未公开的高级选项(例如创建超过两层深的路由树结构),你可能需要编辑 Alertmanager 自定义资源。
+
+你也可以在集群中创建多个 Alertmanager 来实现命名空间范围的监控。在这种情况下,你应该使用相同的底层 Alertmanager Config Secret 来管理 Alertmanager 自定义资源。
+
+### 深度嵌套的路由
+
+虽然 Rancher UI 仅支持两层深度的路由树,但你可以通过编辑 Alertmanager YAML 来配置更深的嵌套路由结构。
+
+### 多个 Alertmanager 副本
+
+作为 Chart 部署选项的一部分,你可以选择增加部署到集群上的 Alertmanager 副本的数量。这些副本使用相同的底层 Alertmanager Config Secret 进行管理。
+
+此 Secret 可以按照你的需求随时更新或修改:
+
+- 添加新的通知程序或接收器
+- 更改应该发送给指定通知程序或接收器的告警
+- 更改发出的告警组
+
+默认情况下,你可以选择提供现有的 Alertmanager Config Secret(即 `cattle-monitoring-system` 命名空间中的任何 Secret),或允许 Rancher Monitoring 将默认的 Alertmanager Config Secret 部署到你的集群上。
+
+默认情况下,在升级或卸载 `rancher-monitoring` Chart 时,Rancher 创建的 Alertmanager Config Secret 不会被修改或删除。这个限制可以防止用户在 Chart 上执行操作时丢失或覆盖他们的告警配置。
+
+有关可以在 Alertmanager Config Secret 中指定的字段的更多信息,请查看 [Prometheus Alertmanager 文档](https://prometheus.io/docs/alerting/latest/alertmanager/)。
+
+你可以在[此处](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file)找到 Alertmanager 配置文件的完整规范及其内容。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md
new file mode 100644
index 00000000000..b83ad90b748
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md
@@ -0,0 +1,19 @@
+---
+title: Prometheus 配置
+---
+
+通常情况下,你不需要直接编辑 Prometheus 自定义资源,因为 Monitoring 应用会根据 ServiceMonitor 和 PodMonitor 的更改自动更新资源。
+
+:::note
+
+本节参考假设你已经熟悉 Monitoring 组件的协同工作方式。有关详细信息,请参阅[本节](../../../../integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md)。
+
+:::
+
+## 关于 Prometheus 自定义资源
+
+Prometheus CR 定义了所需的 Prometheus deployment。Prometheus Operator 会观察 Prometheus CR。当 CR 发生变化时,Prometheus Operator 会创建 `prometheus-rancher-monitoring-prometheus`,即根据 CR 配置的 Prometheus deployment。
+
+Prometheus CR 指定了详细信息,例如规则以及连接到 Prometheus 的 Alertmanager。Rancher 会为你构建这个 CR。
+
+Monitoring V2 仅支持每个集群一个 Prometheus。如果你想将监控限制到指定命名空间,你需要编辑 Prometheus CR。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
new file mode 100644
index 00000000000..458011a703c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md
@@ -0,0 +1,80 @@
+---
+title: PrometheusRule 配置
+---
+
+PrometheusRule 定义了一组 Prometheus 告警和/或记录规则。
+
+:::note
+
+本节参考假设你已经熟悉 Monitoring 组件的协同工作方式。有关详细信息,请参阅[本节](../../../../integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md)。
+
+:::
+
+### 在 Rancher UI 中创建 PrometheusRule
+
+:::note 先决条件:
+
+已安装 Monitoring 应用。
+
+:::
+
+要在 Rancher UI 中创建规则组:
+
+1. 转到要创建规则组的集群。单击**监控 > 高级选项**,然后单击 **PrometheusRules**。
+1. 单击**创建**。
+1. 输入**组名称**。
+1. 配置规则。在 Rancher 的 UI 中,规则组需要包含告警规则或记录规则,但不能同时包含两者。如需获取填写表单的帮助,请参阅下方的配置选项。
+1. 单击**创建**。
+
+**结果**:告警可以向接收器发送通知。
+
+### 关于 PrometheusRule 自定义资源
+
+当你定义规则时(在 PrometheusRule 资源的 RuleGroup 中声明),[规则本身的规范](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule)会包含标签,然后 Alertmanager 会使用这些标签来确定接收此告警的路由。例如,标签为 `team: front-end` 的告警将发送到与该标签匹配的所有路由。
+
+Prometheus 规则文件保存在 PrometheusRule 自定义资源中。PrometheusRule 支持定义一个或多个 RuleGroup。每个 RuleGroup 由一组 Rule 对象组成,每个 Rule 对象均能表示告警或记录规则,并具有以下字段:
+
+- 新告警或记录的名称
+- 新告警或记录的 PromQL 表达式
+- 用于标记告警或记录的标签(例如集群名称或严重性)
+- 对需要在告警通知上显示的其他重要信息进行编码的注释(例如摘要、描述、消息、Runbook URL 等)。记录规则不需要此字段。
+
+有关可以指定的字段的更多信息,请查看 [Prometheus Operator 规范。](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec)
+
+你可以使用 Prometheus 对象中的标签选择器字段 `ruleSelector` 来定义要挂载到 Prometheus 的规则文件。
+
+如需查看示例,请参阅 Prometheus 文档中的[记录规则](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)和[告警规则](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)。
+
+## 配置
+
+### 规则组
+
+| 字段 | 描述 |
+|-------|----------------|
+| 组名称 | 组的名称。在规则文件中必须是唯一的。 |
+| 覆盖组间隔 | 组中规则的评估时间间隔(单位:秒)。 |
+
+
+### 告警规则
+
+[告警规则](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)可以让你根据 PromQL(Prometheus 查询语言)表达式来定义告警条件,并将触发告警的通知发送到外部服务。
+
+| 字段 | 描述 |
+|-------|----------------|
+| 告警名称 | 告警的名称。必须是有效的标签值。 |
+| 告警触发等待时间 | 时长,以秒为单位。当告警触发时间到达该指定时长时,则视为触发。当告警未触发足够长的时间,则视为待处理。 |
+| PromQL 表达式 | 要评估的 PromQL 表达式。Prometheus 将在每个评估周期评估此 PromQL 表达式的当前值,并且所有生成的时间序列都将成为待处理/触发告警。有关详细信息,请参阅 [Prometheus 文档](https://prometheus.io/docs/prometheus/latest/querying/basics/)或我们的 [PromQL 表达式示例](../../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md)。 |
+| Labels | 为每个告警添加或覆盖的标签。 |
+| 严重程度 | 启用后,标签会附加到告警或记录中,这些标签通过严重程度来标识告警/记录。 |
+| 严重程度 Label 值 | Critical,warning 或 none |
+| 注释 | 注释是一组信息标签,可用于存储更长的附加信息,例如告警描述或 Runbook 链接。[Runbook](https://en.wikipedia.org/wiki/Runbook) 是一组有关如何处理告警的文档。注释值可以是[模板化](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#templating)的。 |
+
+### 记录规则
+
+[记录规则](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules)允许你预先计算常用或计算量大的 PromQL(Prometheus 查询语言)表达式,并将其结果保存为一组新的时间序列。
+
+| 字段 | 描述 |
+|-------|----------------|
+| 时间序列名称 | 要输出的时间序列的名称。必须是有效的指标名称。 |
+| PromQL 表达式 | 要评估的 PromQL 表达式。Prometheus 将在每个评估周期评估此 PromQL 表达式的当前值,并且将结果记录为一组新的时间序列,其指标名称由“记录”指定。有关表达式的更多信息,请参阅 [Prometheus 文档](https://prometheus.io/docs/prometheus/latest/querying/basics/)或我们的 [PromQL 表达式示例](../../../../integrations-in-rancher/monitoring-and-alerting/promql-expressions.md)。 |
+| Labels | 在存储结果之前要添加或覆盖的标签。 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration-guides.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/monitoring-v2-configuration-guides.md
similarity index 68%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration-guides.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/monitoring-v2-configuration-guides.md
index cdc63d55f53..3b53fe7214f 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/monitoring-v2-configuration-guides.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/monitoring-v2-configuration-guides.md
@@ -2,13 +2,17 @@
title: 配置
---
+
+
+
+
本文介绍在 Rancher UI 中配置 Monitoring V2 的一些最重要选项。
有关为 Prometheus 配置自定义抓取目标和规则的信息,请参阅 [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 的上游文档。Prometheus Operator [设计文档](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md)中解释了一些最重要的自定义资源。Prometheus Operator 文档还可以帮助你设置 RBAC、Thanos 或进行自定义配置。
## 设置资源限制和请求
-安装 `rancher-monitoring` 时可以配置 Monitoring 应用的资源请求和限制。有关默认限制的更多信息,请参阅[此页面](../reference-guides/monitoring-v2-configuration/helm-chart-options.md#配置资源限制和请求)。
+安装 `rancher-monitoring` 时可以配置 Monitoring 应用的资源请求和限制。有关默认限制的更多信息,请参阅[此页面](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#配置资源限制和请求)。
:::tip
@@ -22,14 +26,13 @@ title: 配置
相反,要让 Prometheus 抓取自定义指标,你只需创建一个新的 ServiceMonitor 或 PodMonitor 来将 Prometheus 配置为抓取其他指标。
-
### ServiceMonitor 和 PodMonitor 配置
-有关详细信息,请参阅[此页面](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md)。
+有关详细信息,请参阅[此页面](../../../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md)。
### 高级 Prometheus 配置
-有关直接编辑 Prometheus 自定义资源(对高级用例可能有帮助)的更多信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md)。
+有关直接编辑 Prometheus 自定义资源(对高级用例可能有帮助)的更多信息,请参阅[此页面](advanced-configuration/prometheus.md)。
## Alertmanager 配置
@@ -37,15 +40,16 @@ Alertmanager 自定义资源通常不需要直接编辑。在常见用例中,
路由和接收器是 Alertmanager 自定义资源配置的一部分。在 Rancher UI 中,路由(Route)和接收器(Receiver)并不是真正的自定义资源,而是 Prometheus Operator 用来将你的配置与 Alertmanager 自定义资源同步的伪自定义资源。当路由和接收器更新时,Monitoring 应用将自动更新 Alertmanager 来反映这些更改。
-对于一些高级用例,你可能需要直接配置 Alertmanager。有关详细信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md)。
+对于一些高级用例,你可能需要直接配置 Alertmanager。有关详细信息,请参阅[此页面](advanced-configuration/alertmanager.md)。
### 接收器
-接收器(Receiver)用于设置通知。有关如何配置接收器的详细信息,请参阅[此页面](../reference-guides/monitoring-v2-configuration/receivers.md)。
+接收器(Receiver)用于设置通知。有关如何配置接收器的详细信息,请参阅[此页面](../../../reference-guides/monitoring-v2-configuration/receivers.md)。
+
### 路由
-路由(Route)在通知到达接收器之前过滤它们。每条路由都需要引用一个已经配置好的接收器。有关如何配置路由的详细信息,请参阅[此页面](../reference-guides/monitoring-v2-configuration/routes.md)。
+路由(Route)在通知到达接收器之前过滤它们。每条路由都需要引用一个已经配置好的接收器。有关如何配置路由的详细信息,请参阅[此页面](../../../reference-guides/monitoring-v2-configuration/routes.md)。
### 高级配置
-有关直接编辑 Alertmanager 自定义资源(对高级用例可能有帮助)的更多信息,请参阅[此页面](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md)。
\ No newline at end of file
+有关直接编辑 Alertmanager 自定义资源(对高级用例可能有帮助)的更多信息,请参阅[此页面](advanced-configuration/alertmanager.md)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
new file mode 100644
index 00000000000..7e43c7234b2
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/open-ports-with-firewalld.md
@@ -0,0 +1,107 @@
+---
+title: 使用 firewalld 打开端口
+---
+
+> 我们建议禁用 firewalld。如果你使用的是 Kubernetes 1.19 或更高版本,则必须关闭 firewalld。
+
+某些 [源自 RHEL](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) 的 Linux 发行版(包括 Oracle Linux)的默认防火墙规则可能会阻止与 Helm 的通信。
+
+例如,AWS 中的一个 Oracle Linux 镜像具有 REJECT 规则,这些规则会阻止 Helm 与 Tiller 通信:
+
+```
+Chain INPUT (policy ACCEPT)
+target prot opt source destination
+ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
+ACCEPT icmp -- anywhere anywhere
+ACCEPT all -- anywhere anywhere
+ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh
+REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
+
+Chain FORWARD (policy ACCEPT)
+target prot opt source destination
+REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
+
+Chain OUTPUT (policy ACCEPT)
+target prot opt source destination
+```
+
+你可运行以下命令检查默认防火墙规则:
+
+```
+sudo iptables --list
+```
+
+下文介绍如何使用 `firewalld`,将[防火墙端口规则](../../pages-for-subheaders/installation-requirements.md#端口要求)应用到高可用 Rancher Server 集群中的节点。
+
+## 先决条件
+
+安装 v7.x 或更高版本的 `firewalld`:
+
+```
+yum install firewalld
+systemctl start firewalld
+systemctl enable firewalld
+```
+
+## 应用防火墙端口规则
+
+在 Rancher 高可用安装中,Rancher Server 设置在三个节点上,三个节点均具有 Kubernetes 的所有三个角色(etcd、controlplane 和 worker)。如果你的 Rancher Server 节点同时具有这三个角色,请在每个节点上运行以下命令:
+
+```
+firewall-cmd --permanent --add-port=22/tcp
+firewall-cmd --permanent --add-port=80/tcp
+firewall-cmd --permanent --add-port=443/tcp
+firewall-cmd --permanent --add-port=2376/tcp
+firewall-cmd --permanent --add-port=2379/tcp
+firewall-cmd --permanent --add-port=2380/tcp
+firewall-cmd --permanent --add-port=6443/tcp
+firewall-cmd --permanent --add-port=8472/udp
+firewall-cmd --permanent --add-port=9099/tcp
+firewall-cmd --permanent --add-port=10250/tcp
+firewall-cmd --permanent --add-port=10254/tcp
+firewall-cmd --permanent --add-port=30000-32767/tcp
+firewall-cmd --permanent --add-port=30000-32767/udp
+```
+如果你的 Rancher Server 节点配置了单独的角色,请根据节点角色运行以下命令:
+
+```
+# 在 etcd 节点上运行以下命令:
+firewall-cmd --permanent --add-port=2376/tcp
+firewall-cmd --permanent --add-port=2379/tcp
+firewall-cmd --permanent --add-port=2380/tcp
+firewall-cmd --permanent --add-port=8472/udp
+firewall-cmd --permanent --add-port=9099/tcp
+firewall-cmd --permanent --add-port=10250/tcp
+
+# 在 controlplane 节点上运行以下命令:
+firewall-cmd --permanent --add-port=80/tcp
+firewall-cmd --permanent --add-port=443/tcp
+firewall-cmd --permanent --add-port=2376/tcp
+firewall-cmd --permanent --add-port=6443/tcp
+firewall-cmd --permanent --add-port=8472/udp
+firewall-cmd --permanent --add-port=9099/tcp
+firewall-cmd --permanent --add-port=10250/tcp
+firewall-cmd --permanent --add-port=10254/tcp
+firewall-cmd --permanent --add-port=30000-32767/tcp
+firewall-cmd --permanent --add-port=30000-32767/udp
+
+# 在 worker 节点上运行以下命令:
+firewall-cmd --permanent --add-port=22/tcp
+firewall-cmd --permanent --add-port=80/tcp
+firewall-cmd --permanent --add-port=443/tcp
+firewall-cmd --permanent --add-port=2376/tcp
+firewall-cmd --permanent --add-port=8472/udp
+firewall-cmd --permanent --add-port=9099/tcp
+firewall-cmd --permanent --add-port=10250/tcp
+firewall-cmd --permanent --add-port=10254/tcp
+firewall-cmd --permanent --add-port=30000-32767/tcp
+firewall-cmd --permanent --add-port=30000-32767/udp
+```
+
+在节点上运行 `firewall-cmd` 命令后,使用以下命令启用防火墙规则:
+
+```
+firewall-cmd --reload
+```
+
+**结果**:防火墙已更新,因此 Helm 可以与 Rancher Server 节点通信了。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
new file mode 100644
index 00000000000..70721fb7276
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/advanced-user-guides/tune-etcd-for-large-installs.md
@@ -0,0 +1,39 @@
+---
+title: 为大型安装进行 etcd 调优
+---
+
+当你运行具有 15 个或更多集群的大型 Rancher 安装时,我们建议你扩大 etcd 的默认 keyspace(默认为 2GB)。你最大可以将它设置为 8GB。此外,请确保主机有足够的 RAM 来保存整个数据集。如果需要增加这个值,你还需要同步增加主机的大小。如果你预计在垃圾回收间隔期间 Pod 的变化率很高,你也可以在较小的安装中调整 Keyspace 大小。
+
+Kubernetes 每隔五分钟会自动清理 etcd 数据集。在某些情况下(例如发生部署抖动),在垃圾回收发生并进行清理之前会有大量事件写入 etcd 并删除,从而导致 Keyspace 填满。如果你在 etcd 日志或 Kubernetes API Server 日志中看到 `mvcc: database space exceeded` 错误,你可以在 etcd 服务器上设置 [quota-backend-bytes](https://etcd.io/docs/v3.5/op-guide/maintenance/#space-quota) 来增加 Keyspace 的大小。
+
+### 示例:此 RKE cluster.yml 文件的代码片段将 Keyspace 的大小增加到 5GB
+
+```yaml
+# RKE cluster.yml
+---
+services:
+ etcd:
+ extra_args:
+ quota-backend-bytes: 5368709120
+```
+
+## 扩展 etcd 磁盘性能
+
+你可以参见 [etcd 文档](https://etcd.io/docs/v3.5/tuning/#disk)中的建议,了解如何调整主机上的磁盘优先级。
+
+此外,为了减少 etcd 磁盘上的 IO 争用,你可以为 data 和 wal 目录使用专用设备。etcd 最佳实践不建议配置 Mirror RAID(因为 etcd 在集群中的节点之间复制数据)。你可以使用 striping RAID 配置来增加可用的 IOPS。
+
+要在 RKE 集群中实现此解决方案,你需要在底层主机上为 `/var/lib/etcd/data` 和 `/var/lib/etcd/wal` 目录挂载并格式化磁盘。`etcd` 服务的 `extra_args` 指令中必须包含 `wal_dir` 目录。如果不指定 `wal_dir`,etcd 进程会尝试在权限不足的情况下操作底层的 `wal` 挂载。
+
+```yaml
+# RKE cluster.yml
+---
+services:
+ etcd:
+ extra_args:
+ data-dir: '/var/lib/rancher/etcd/data/'
+ wal-dir: '/var/lib/rancher/etcd/wal/wal_dir'
+ extra_binds:
+ - '/var/lib/etcd/data:/var/lib/rancher/etcd/data'
+ - '/var/lib/etcd/wal:/var/lib/rancher/etcd/wal'
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/add-users-to-projects.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/add-users-to-projects.md
new file mode 100644
index 00000000000..4f4f6712b0e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/add-users-to-projects.md
@@ -0,0 +1,62 @@
+---
+title: 添加项目成员
+---
+
+如果你想为用户提供集群内 _特定_ 项目和资源的访问权限,请为用户分配项目成员资格。
+
+你可以在创建项目时将成员添加到项目中,或将用户添加到现有项目中。
+
+:::tip
+
+如果你需要为用户提供对集群内 _所有_ 项目的访问权限,请参见[添加集群成员](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)。
+
+:::
+
+### 将成员添加到新项目
+
+你可以在创建项目时将成员添加到项目中(建议)。有关创建新项目的详细信息,请参阅[集群管理](../../how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces.md)。
+
+### 将成员添加到现有项目
+
+创建项目后,你可以将用户添加为项目成员,以便用户可以访问项目的资源:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要添加项目成员的集群,然后单击 **Explore**。
+1. 单击**集群 > 项目/命名空间**。
+1. 转到要添加成员的项目。在项目名称上方的**创建命名空间**按钮旁边,单击 **☰**。选择 **编辑配置**。
+1. 在**成员**选项卡中,单击**添加**。
+1. 搜索要添加到项目的用户或组。
+
+ 如果配置了外部身份验证:
+
+ - 在你键入时,Rancher 会从你的外部身份验证源返回用户。
+
+ - 你可以在下拉菜单中添加组,而不是单个用户。下拉列表仅会列出你(登录用户)所在的组。
+
+ :::note
+
+ 如果你以本地用户身份登录,外部用户不会显示在你的搜索结果中。
+
+ :::
+
+1. 分配用户或组的**项目**角色。
+
+ [什么是项目角色?](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)
+
+ :::note 注意事项:
+
+ - 如果用户分配到了项目的`所有者`或`成员`角色,用户会自动继承`命名空间创建`角色。然而,这个角色是 [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole),这表示角色的范围会延展到集群中的所有项目。因此,对于显式分配到了项目`所有者`或`成员`角色的用户来说,即使只有`只读`角色,这些用户也可以在分配给他们的其他项目中创建或删除命名空间。
+
+ - 默认情况下,Rancher 的`项目成员`角色继承自 `Kubernetes-edit` 角色,而`项目所有者`角色继承自 `Kubernetes-admin` 角色。因此,`项目成员`和`项目所有者`角色都能管理命名空间,包括创建和删除命名空间。
+
+ - 对于`自定义`角色,你可以修改可分配的角色列表。
+
+ - 要将角色添加到列表中,请[添加自定义角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md)。
+ - 要从列表中删除角色,请[锁定/解锁角色](../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md)。
+
+ :::
+
+**结果**:已将选中的用户添加到项目中。
+
+- 要撤销项目成员资格,请选择用户并单击**删除**。此操作会删除成员资格,而不会删除用户。
+- 要修改项目中的用户角色,请将其从项目中删除,然后使用修改后的角色重新添加用户。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
new file mode 100644
index 00000000000..94b0333f481
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/about-provisioning-drivers.md
@@ -0,0 +1,51 @@
+---
+title: 配置驱动
+---
+
+
+
+
+
+使用 Rancher 中的驱动,你可以管理可以使用哪些供应商来部署[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)或[云服务器节点](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),以允许 Rancher 部署和管理 Kubernetes。
+
+### Rancher 驱动
+
+你可以启用或禁用 Rancher 中内置的驱动。如果相关驱动 Rancher 尚未实现,你可以添加自己的驱动。
+
+Rancher 中有两种类型的驱动:
+
+* [集群驱动](#集群驱动)
+* [主机驱动](#主机驱动)
+
+### 集群驱动
+
+集群驱动用于配置[托管的 Kubernetes 集群](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md),例如 GKE、EKS、AKS 等。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将显示为为托管 Kubernetes 集群创建集群的选项。默认情况下,Rancher 与几个现有的集群驱动打包在一起,但你也可以创建自定义集群驱动并添加到 Rancher。
+
+默认情况下,Rancher 已激活多个托管 Kubernetes 云提供商,包括:
+
+* [Amazon EKS](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/eks.md)
+* [Google GKE](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md)
+* [Azure AKS](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md)
+
+还有几个托管的 Kubernetes 云提供商是默认禁用的,但也打包在 Rancher 中:
+
+* [Alibaba ACK](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md)
+* [Huawei CCE](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md)
+* [Tencent](../../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md)
+
+### 主机驱动
+
+主机驱动用于配置主机,Rancher 使用这些主机启动和管理 Kubernetes 集群。主机驱动与 [Docker Machine 驱动](https://docs.docker.com/machine/drivers/)相同。创建主机模板时可以显示的主机驱动,是由主机驱动的状态定义的。只有 `active` 主机驱动将显示为创建节点模板的选项。默认情况下,Rancher 与许多现有的 Docker Machine 驱动打包在一起,但你也可以创建自定义主机驱动并添加到 Rancher。
+
+如果你不想向用户显示特定的主机驱动,则需要停用这些主机驱动。
+
+Rancher 支持几家主要的云提供商,但默认情况下,这些主机驱动处于 active 状态并可供部署:
+
+* [Amazon EC2](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md)
+* [Azure](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md)
+* [Digital Ocean](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md)
+* [vSphere](../../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/vsphere.md)
+
+还有其他几个默认禁用的主机驱动,但打包在 Rancher 中:
+
+* [Harvester](../../../../integrations-in-rancher/harvester/overview.md#harvester-主机驱动) - 在 Rancher 2.6.1 中可用
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
new file mode 100644
index 00000000000..5f9c30227ba
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md
@@ -0,0 +1,42 @@
+---
+title: 集群驱动
+---
+
+集群驱动用于在[托管 Kubernetes 提供商](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)(例如 Google GKE)中创建集群。创建集群时可以显示的集群驱动,是由集群驱动的状态定义的。只有 `active` 集群驱动将作为创建集群的选项显示。默认情况下,Rancher 与多个现有的云提供商集群驱动打包在一起,但你也可以将自定义集群驱动添加到 Rancher。
+
+如果你不想向用户显示特定的集群驱动,你可以在 Rancher 中停用这些集群驱动,它们将不会作为创建集群的选项出现。
+
+### 管理集群驱动
+
+:::note 先决条件:
+
+要创建、编辑或删除集群驱动,你需要以下权限中的_一个_:
+
+- [管理员全局权限](../manage-role-based-access-control-rbac/global-permissions.md)
+- 分配了[管理集群驱动角色](../manage-role-based-access-control-rbac/global-permissions.md)的[自定义全局权限](../manage-role-based-access-control-rbac/global-permissions.md#自定义全局权限)。
+
+:::
+
+## 激活/停用集群驱动
+
+默认情况下,Rancher 仅激活主流的云提供商 Google GKE、Amazon EKS 和 Azure AKS 的驱动。如果要显示或隐藏驱动,你可以更改驱动的状态:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+
+2. 在左侧导航菜单中,单击**驱动**。
+
+3. 在**集群驱动**选项卡上,选择要激活或停用的驱动,然后单击 **⋮ > 激活** 或 **⋮ > 停用**。
+
+## 添加自定义集群驱动
+
+如果你想使用 Rancher 不支持开箱即用的集群驱动,你可以添加提供商的驱动,从而使用该驱动来创建 _托管_ Kubernetes 集群:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航菜单中,单击**驱动**。
+1. 在**集群驱动**选项卡上,单击**添加集群驱动**。
+1. 填写**添加集群驱动**表单。然后单击**创建**。
+
+
+### 开发自己的集群驱动
+
+如果要开发集群驱动并添加到 Rancher,请参考我们的[示例](https://github.com/rancher-plugins/kontainer-engine-driver-example)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
new file mode 100644
index 00000000000..bb49bcbe46a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md
@@ -0,0 +1,41 @@
+---
+title: 主机驱动
+---
+
+主机驱动用于配置主机,Rancher 使用这些主机启动和管理 Kubernetes 集群。主机驱动与 [Docker Machine 驱动](https://docs.docker.com/machine/drivers/)相同。创建主机模板时可以显示的主机驱动,是由主机驱动的状态定义的。只有 `active` 主机驱动将显示为创建节点模板的选项。默认情况下,Rancher 与许多现有的 Docker Machine 驱动打包在一起,但你也可以创建自定义主机驱动并添加到 Rancher。
+
+如果你不想向用户显示特定的主机驱动,则需要停用这些主机驱动。
+
+#### 管理主机驱动
+
+:::note 先决条件:
+
+要创建、编辑或删除驱动,你需要以下权限中的_一个_:
+
+- [管理员全局权限](../manage-role-based-access-control-rbac/global-permissions.md)
+- 分配了[管理主机驱动角色](../manage-role-based-access-control-rbac/global-permissions.md)的[自定义全局权限](../manage-role-based-access-control-rbac/global-permissions.md#自定义全局权限)。
+
+:::
+
+## 激活/停用主机驱动
+
+默认情况下,Rancher 仅激活主流云提供商 Amazon EC2、Azure、DigitalOcean 和 vSphere 的驱动。如果要显示或隐藏驱动,你可以更改驱动的状态:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+
+2. 在左侧导航菜单中,单击**驱动**。
+
+2. 在**主机驱动**选项卡上,选择要激活或停用的驱动,然后单击 **⋮ > 激活** 或 **⋮ > 停用**。
+
+## 添加自定义主机驱动
+
+如果你想使用 Rancher 不支持开箱即用的主机驱动,你可以添加提供商的驱动,从而使用该驱动为你的 Kubernetes 集群创建节点模板并最终创建节点池:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航菜单中,单击**驱动**。
+1. 在**主机驱动**选项卡上,单击**添加主机驱动**。
+1. 填写**添加主机驱动**表单。然后单击**创建**。
+
+### 开发自己的主机驱动
+
+主机驱动使用 [Docker Machine](https://docs.docker.com/machine/) 来实现。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-rke1-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/about-rke1-templates.md
similarity index 54%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-rke1-templates.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/about-rke1-templates.md
index 3f4012ac5dd..de9bc25d31a 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/about-rke1-templates.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/about-rke1-templates.md
@@ -2,6 +2,10 @@
title: RKE 模板
---
+
+
+
+
RKE 模板旨在让 DevOps 和安全团队标准化和简化 Kubernetes 集群创建的流程。
RKE 的全称是 [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/),它是 Rancher 用来配置 Kubernetes 集群的工具。
@@ -14,7 +18,7 @@ RKE 模板有助于标准化这些配置。无论是使用 Rancher UI、Rancher
如果集群是使用 RKE 模板创建的,则不能让集群使用另一个 RKE 模板。你只能将集群更新为同一模板的新版本。
-你可以[将现有集群的配置保存为 RKE 模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#将现有集群转换为使用-rke-模板)。这样,只有模板更新后才能更改集群的设置。新模板还可用于启动新集群。
+你可以[将现有集群的配置保存为 RKE 模板](apply-templates.md#将现有集群转换为使用-rke-模板)。这样,只有模板更新后才能更改集群的设置。新模板还可用于启动新集群。
RKE 模板的核心功能允许 DevOps 和安全团队:
@@ -45,24 +49,24 @@ RKE 模板的[附加组件](#附加组件)的功能特别强大,因为它允
Rancher 配置的集群支持 RKE 模板。模板可用于配置自定义集群或由基础设施提供商启动的集群。
-RKE 模板用于定义 Kubernetes 和 Rancher 设置。节点模板负责配置节点。有关如何将 RKE 模板与硬件结合使用的参考,请参阅 [RKE 模板和硬件](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md)。
+RKE 模板用于定义 Kubernetes 和 Rancher 设置。节点模板负责配置节点。有关如何将 RKE 模板与硬件结合使用的参考,请参阅 [RKE 模板和硬件](infrastructure.md)。
可以从头开始创建 RKE 模板来预先定义集群配置。它们可以用于启动新集群,也可以从现有的 RKE 集群导出模板。
-现有集群的设置可以[保存为 RKE 模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#将现有集群转换为使用-rke-模板)。这会创建一个新模板并将集群设置绑定到该模板。这样,集群只有在[模板更新](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#更新模板)的情况下才能[使用新版本的模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#升级集群以使用新的模板修订版)进行升级。新模板也可以用来创建新集群。
+现有集群的设置可以[保存为 RKE 模板](apply-templates.md#将现有集群转换为使用-rke-模板)。这会创建一个新模板并将集群设置绑定到该模板。这样,集群只有在[模板更新](manage-rke1-templates.md#更新模板)的情况下才能[使用新版本的模板](manage-rke1-templates.md#升级集群以使用新的模板修订版)进行升级。新模板也可以用来创建新集群。
## 示例场景
如果一个组织同时拥有普通和高级 Rancher 用户,管理员可能希望为高级用户提供更多用于集群创建的选项,并限制普通用户的选项。
-这些[示例场景](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md)描述组织如何使用模板来标准化集群创建。
+这些[示例场景](example-use-cases.md)描述组织如何使用模板来标准化集群创建。
示例场景包括:
-- **强制执行模板**:如果希望所有 Rancher 配置的新集群都具有某些设置,管理员可能想要[为每个用户强制执行一项或多项模板设置](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#强制执行模板设置)。
-- **与不同的用户共享不同的模板**:管理员可以为[普通用户和高级用户提供不同的模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#普通用户和高级用户模板)。这样,普通用户会有更多限制选项,而高级用户在创建集群时可以使用更多选项。
-- **更新模板设置**:如果组织的安全和 DevOps 团队决定将最佳实践嵌入到新集群所需的设置中,这些最佳实践可能会随着时间而改变。如果最佳实践发生变化,[可以将模板更新为新版本](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#更新模板和集群),这样,使用模板创建的集群可以[升级到模板的新版本](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#升级集群以使用新的模板修订版)。
-- **共享模板的所有权**:当模板所有者不再想要维护模板或想要共享模板的所有权时,此方案描述了如何[共享模板所有权](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#允许其他用户控制和共享模板)。
+- **强制执行模板**:如果希望所有 Rancher 配置的新集群都具有某些设置,管理员可能想要[为每个用户强制执行一项或多项模板设置](example-use-cases.md#强制执行模板设置)。
+- **与不同的用户共享不同的模板**:管理员可以为[普通用户和高级用户提供不同的模板](example-use-cases.md#普通用户和高级用户模板)。这样,普通用户会有更多限制选项,而高级用户在创建集群时可以使用更多选项。
+- **更新模板设置**:如果组织的安全和 DevOps 团队决定将最佳实践嵌入到新集群所需的设置中,这些最佳实践可能会随着时间而改变。如果最佳实践发生变化,[可以将模板更新为新版本](example-use-cases.md#更新模板和集群),这样,使用模板创建的集群可以[升级到模板的新版本](manage-rke1-templates.md#升级集群以使用新的模板修订版)。
+- **共享模板的所有权**:当模板所有者不再想要维护模板或想要共享模板的所有权时,此方案描述了如何[共享模板所有权](example-use-cases.md#允许其他用户控制和共享模板)。
## 模板管理
@@ -78,34 +82,34 @@ RKE 模板更新通过修订系统处理。如果要更改或更新模板,请
本节中的文件解释了 RKE 模板管理的细节:
-- [获取创建模板的权限](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md)
-- [创建和修改模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md)
-- [强制执行模板设置](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md#强制新集群使用-rke-模板)
-- [覆盖模板设置](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md)
-- [与集群创建者共享模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#与特定用户或组共享模板)
-- [共享模板的所有权](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#共享模板所有权)
+- [获取创建模板的权限](creator-permissions.md)
+- [创建和修改模板](manage-rke1-templates.md)
+- [强制执行模板设置](enforce-templates.md#强制新集群使用-rke-模板)
+- [覆盖模板设置](override-template-settings.md)
+- [与集群创建者共享模板](access-or-share-templates.md#与特定用户或组共享模板)
+- [共享模板的所有权](access-or-share-templates.md#共享模板所有权)
-你可以参见此[模板的示例 YAML 文件](../reference-guides/rke1-template-example-yaml.md)作为参考。
+你可以参见此[模板的示例 YAML 文件](../../../../reference-guides/rke1-template-example-yaml.md)作为参考。
## 应用模板
-你可以使用你自己创建的模板来[创建集群](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#使用-rke-模板创建集群),也可以使用[与你共享的模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md)来创建集群。
+你可以使用你自己创建的模板来[创建集群](apply-templates.md#使用-rke-模板创建集群),也可以使用[与你共享的模板](access-or-share-templates.md)来创建集群。
-如果 RKE 模板所有者创建了模板的新版本,你可以[将你的集群升级到该版本](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#更新使用-rke-模板创建的集群)。
+如果 RKE 模板所有者创建了模板的新版本,你可以[将你的集群升级到该版本](apply-templates.md#更新使用-rke-模板创建的集群)。
可以从头开始创建 RKE 模板来预先定义集群配置。它们可以用于启动新集群,也可以从现有的 RKE 集群导出模板。
-你可以[将现有集群的配置保存为 RKE 模板](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#将现有集群转换为使用-rke-模板)。这样,只有模板更新后才能更改集群的设置。
+你可以[将现有集群的配置保存为 RKE 模板](apply-templates.md#将现有集群转换为使用-rke-模板)。这样,只有模板更新后才能更改集群的设置。
## 标准化硬件
-RKE 模板的目的是标准化 Kubernetes 和 Rancher 设置。如果你还想标准化你的基础设施,一个选择是将 RKE 模板与[其他工具](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md)一起使用。
+RKE 模板的目的是标准化 Kubernetes 和 Rancher 设置。如果你还想标准化你的基础设施,一个选择是将 RKE 模板与[其他工具](infrastructure.md)一起使用。
-另一种选择是使用包含节点池配置选项,但不强制执行配置的[集群模板](../how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates.md)。
+另一种选择是使用包含节点池配置选项,但不强制执行配置的[集群模板](../../manage-clusters/manage-cluster-templates.md)。
## YAML 定制
-如果将 RKE 模板定义为 YAML 文件,则可以修改此[示例 RKE 模板 YAML](../reference-guides/rke1-template-example-yaml.md)。RKE 模板中的 YAML 使用了 Rancher 在创建 RKE 集群时使用的相同自定义设置。但由于 YAML 要在 Rancher 配置的集群中使用,因此需要将 RKE 模板自定义项嵌套在 YAML 中的 `rancher_kubernetes_engine_config` 参数下。
+如果将 RKE 模板定义为 YAML 文件,则可以修改此[示例 RKE 模板 YAML](../../../../reference-guides/rke1-template-example-yaml.md)。RKE 模板中的 YAML 使用了 Rancher 在创建 RKE 集群时使用的相同自定义设置。但由于 YAML 要在 Rancher 配置的集群中使用,因此需要将 RKE 模板自定义项嵌套在 YAML 中的 `rancher_kubernetes_engine_config` 参数下。
RKE 文档也提供[注释的](https://rancher.com/docs/rke/latest/en/example-yamls/) `cluster.yml` 文件供你参考。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
new file mode 100644
index 00000000000..947f2a9784c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md
@@ -0,0 +1,64 @@
+---
+title: 访问和共享
+---
+
+如果你是 RKE 模板所有者,你可以将该模板共享给用户或用户组,然后他们可以使用该模板创建集群。
+
+由于 RKE 模板是专门与用户和组共享的,因此所有者可以与不同的用户共享不同的 RKE 模板。
+
+共享模板时,每个用户都可以拥有以下两个访问权限中的其中一个:
+
+- **所有者**:可以更新、删除和共享他们拥有的模板。所有者还可以与其他用户共享模板。
+- **用户**:可以使用模板创建集群。他们还可以将这些集群升级到同一模板的新版本。如果你将模板共享为**公开(只读)**,你的 Rancher 设置中的所有用户都拥有该模板的用户访问权限。
+
+如果你创建了一个模板,你将自动成为该模板的所有者。
+
+如果你想让其他用户更新该模板,你可以共享模板的所有权。有关所有者如何修改模板的详细信息,请参阅[修改模板文档](manage-rke1-templates.md)。
+
+共享模板的方式有如下几种:
+
+- 在模板创建期间将用户添加到新的 RKE 模板
+- 将用户添加到现有 RKE 模板
+- 公开 RKE 模板,并与 Rancher 设置中的所有用户共享
+- 与受信任修改模板的用户共享模板所有权
+
+### 与特定用户或组共享模板
+
+要允许用户或组使用你的模板创建集群,你可以为他们提供模板的基本**用户**访问权限。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在 **RKE1 配置**下,单击 **RKE 模板**。
+1. 转到要共享的模板,然后单击 **⋮ > 编辑**。
+1. 在**共享模板**中,单击**添加成员**。
+1. 在**名称**字段中搜索你要与之共享模板的用户或组。
+1. 选择**用户**访问类型。
+1. 单击**保存**。
+
+**结果**:用户或组可以使用模板创建集群。
+
+### 与所有用户共享模板
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要共享的模板,然后单击 **⋮ > 编辑**。
+1. 在**共享模板**下,选中 **公开(只读)** 复选框。
+1. 单击**保存**。
+
+**结果**:Rancher 设置中的所有用户都可以使用该模板创建集群。
+
+### 共享模板所有权
+
+如果你是模板的创建者,你可能希望将维护和更新模板的责任委派给其他用户或组。
+
+在这种情况下,你可以为用户提供**所有者**访问权限,该权限允许其他用户更新、删除模板或与其他用户共享对模板的访问权限。
+
+要授予用户或组**所有者**权限:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在 **RKE1 配置**下,单击 **RKE 模板**。
+1. 转到要共享的 RKE 模板,然后单击 **⋮ > 编辑**。
+1. 在**共享模板**下,单击**添加成员**并在**名称**字段中搜索要与之共享模板的用户或组。
+1. 在**访问类型**字段中,单击**所有者**。
+1. 单击**保存**。
+
+**结果**:用户或组具有**所有者**访问类型,可以修改、共享或删除模板。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
new file mode 100644
index 00000000000..0bf174f1ab7
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md
@@ -0,0 +1,59 @@
+---
+title: 应用模板
+---
+
+你可以使用你自己创建的 RKE 模板来创建集群,也可以使用[与你共享的模板](access-or-share-templates.md)来创建集群。
+
+RKE 模板可以应用于新集群。
+
+你可以[将现有集群的配置保存为 RKE 模板](#将现有集群转换为使用-rke-模板)。这样,只有模板更新后才能更改集群的设置。
+
+你无法将集群更改为使用不同的 RKE 模板。你只能将集群更新为同一模板的新版本。
+
+
+### 使用 RKE 模板创建集群
+
+要使用 RKE 模板添加[由基础设施提供商托管](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的集群,请按照以下步骤操作:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,单击**创建**并选择基础设施提供商。
+1. 设置集群名称和节点模板详情。
+1. 要使用 RKE 模板,请在**集群选项**下,选中**使用现有 RKE 模板和修订版**复选框。
+1. 从下拉菜单中选择 RKE 模板和修订版。
+1. 可选:你可以编辑 RKE 模板所有者在创建模板时标记为**允许用户覆盖**的任何设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。然后,你需要编辑集群来将其升级到新版本。
+1. 单击**创建**以启动集群。
+
+### 更新使用 RKE 模板创建的集群
+
+模板所有者创建 RKE 模板时,每个设置在 Rancher UI 中都有一个开关,指示用户是否可以覆盖该设置。
+
+- 如果某个设置允许用户覆盖,你可以通过[编辑集群](../../../../pages-for-subheaders/cluster-configuration.md)来更新集群中的设置。
+- 如果该开关处于关闭状态,则除非集群所有者创建了允许你覆盖这些设置的模板修订版,否则你无法更改这些设置。如果你无法更改某些设置,则需要联系模板所有者以获取模板的新修订版。
+
+如果集群是使用 RKE 模板创建的,你可以编辑集群,来将集群更新为模板的新版本。
+
+现有集群的设置可以[保存为 RKE 模板](#将现有集群转换为使用-rke-模板)。在这种情况下,你还可以编辑集群以将集群更新为模板的新版本。
+
+:::note
+
+你无法将集群更改为使用不同的 RKE 模板。你只能将集群更新为同一模板的新版本。
+
+:::
+
+### 将现有集群转换为使用 RKE 模板
+
+本节介绍如何使用现有集群创建 RKE 模板。
+
+除非你将现有集群的设置保存为 RKE 模板,否则 RKE 模板不能应用于现有集群。这将把集群的设置导出为新的 RKE 模板,并且将集群绑定到该模板。然后,只有[更新了模板](manage-rke1-templates.md#更新模板)并且集群升级到**使用更新版本的模板**时,集群才能改变。
+
+要将现有集群转换为使用 RKE 模板:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要转换为使用 RKE 模板的集群。单击 **⋮ > 保存为 RKE 模板**。
+1. 在出现的表单中输入模板的名称,然后单击**创建**。
+
+**结果**:
+
+- 创建了一个新的 RKE 模板。
+- 将集群转换为使用该新模板。
+- 可以[使用新模板创建新集群](#使用-rke-模板创建集群)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md
new file mode 100644
index 00000000000..7db812c38ef
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md
@@ -0,0 +1,57 @@
+---
+title: 模板创建者权限
+---
+
+管理员有创建 RKE 模板的权限,只有管理员可以将该权限授予其他用户。
+
+有关管理员权限的更多信息,请参阅[全局权限文档](../manage-role-based-access-control-rbac/global-permissions.md)。
+
+## 授予用户创建模板的权限
+
+只有具有**创建 RKE 模板**全局权限的用户才能创建模板。
+
+管理员拥有创建模板的全局权限,只有管理员才能将该权限授予其他用户。
+
+有关允许用户修改现有模板的信息,请参阅[共享模板](access-or-share-templates.md)。
+
+管理员可以通过两种方式授予用户创建 RKE 模板的权限:
+
+- 通过编辑[单个用户](#允许用户创建模板)的权限
+- 通过更改[新用户的默认权限](#默认允许新用户创建模板)
+
+### 允许用户创建模板
+
+管理员可以按照以下步骤将**创建 RKE 模板**角色单独授予给任何现有用户:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**用户**。
+1. 选择要编辑的用户,然后单击 **⋮ > 编辑配置**。
+1. 在**内置角色**中,选中**创建 RKE 集群模板**角色以及用户应具有的其他角色。你可能还需要选中**创建 RKE 模板修订版**复选框。
+1. 单击**保存**。
+
+**结果**:用户拥有创建 RKE 模板的权限。
+
+### 默认允许新用户创建模板
+
+管理员也可以按照以下步骤为所有新用户授予创建 RKE 模板的默认权限。这不会影响现有用户的权限。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 转到**创建 RKE 集群模板**角色,然后单击 **⋮ > 编辑配置**。
+1. 选择**是:新用户的默认角色**选项。
+1. 单击**保存**。
+1. 如果你希望新用户能够创建 RKE 模板修订,请将该角色设置为默认值。
+
+**结果**:在此 Rancher 安装中创建的任何新用户都可以创建 RKE 模板。现有用户将不会获得此权限。
+
+### 取消创建模板的权限
+
+管理员可以通过以下步骤删除用户创建模板的权限。请注意,无论是否选择了细粒度权限,管理员都可以完全控制所有资源。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**用户**。
+1. 选择要编辑权限的用户,然后单击 **⋮ > 编辑配置**。
+1. 在**内置角色**中,取消选中**创建 RKE 模板**和**创建 RKE 模板修订版**复选框(如果适用)。在此处,你可以将用户改回普通用户,或授予用户一组不同的权限。
+1. 单击**保存**。
+
+**结果**:用户无法创建 RKE 模板。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md
new file mode 100644
index 00000000000..38ab0f41ee8
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md
@@ -0,0 +1,43 @@
+---
+title: 强制使用模板
+---
+
+本节介绍模板管理员如何在 Rancher 中强制执行模板,从而限制用户在没有模板的情况下创建集群。
+
+默认情况下,Rancher 中的任何普通用户都可以创建集群。但当开启强制使用 RKE 模板时,有以下约束:
+
+- 只有管理员才能在没有模板的情况下创建集群。
+- 所有普通用户必须使用 RKE 模板来创建新集群。
+- 普通用户在不使用模板的情况下无法创建集群。
+
+只有管理员[授予权限](creator-permissions.md#允许用户创建模板)后,用户才能创建新模板。
+
+使用 RKE 模板创建集群后,集群创建者无法编辑模板中定义的设置。创建集群后更改这些设置的唯一方法是[将集群升级到相同模板的新修订版](apply-templates.md#更新使用-rke-模板创建的集群)。如果集群创建者想要更改模板定义的设置,他们需要联系模板所有者以获取模板的新版本。有关模板修订如何工作的详细信息,请参阅[修订模板](manage-rke1-templates.md#更新模板)。
+
+## 强制新集群使用 RKE 模板
+
+要求用户创建新集群时使用模板,可以确保[普通用户](../manage-role-based-access-control-rbac/global-permissions.md)启动的任何集群都使用经过管理员审核的 Kubernetes 和 Rancher 设置。
+
+管理员可以通过以下步骤启用 RKE 模板强制,从而要求用户必须使用模板创建集群:
+
+1. 单击 **☰ > 全局设置**。
+1. 转到 `cluster-template-enforcement` 设置。单击 **⋮ > 编辑设置**。
+1. 将值设置为 **True** 并单击**保存**。
+
+ :::note 重要提示:
+
+ 如果管理员将 `cluster-template-enforcement` 设置为 True ,还需要与用户共享 `clusterTemplates`,以便用户可以选择其中一个模板来创建集群。
+
+ :::
+
+**结果**:除非创建者是管理员,否则 Rancher 配置的所有集群都必须使用模板。
+
+## 禁用 RKE 模板强制
+
+管理员可以通过以下步骤关闭 RKE 模板强制,从而允许用户在没有 RKE 模板的情况下创建新集群:
+
+1. 单击 **☰ > 全局设置**。
+1. 转到 `cluster-template-enforcement` 设置。单击 **⋮ > 编辑设置**。
+1. 将值设置为 **False** 并单击**保存**。
+
+**结果**:在 Rancher 中创建集群时,用户不需要使用模板。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md
new file mode 100644
index 00000000000..7457df20253
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md
@@ -0,0 +1,70 @@
+---
+title: 示例场景
+---
+
+以下示例场景描述了组织如何使用模板来标准化集群创建。
+
+- **强制执行模板**:如果希望所有 Rancher 配置的新集群都具有某些设置,管理员可能想要[为每个用户强制执行一项或多项模板设置](#强制执行模板设置)。
+- **与不同的用户共享不同的模板**:管理员可以为[普通用户和高级用户提供不同的模板](#普通用户和高级用户模板)。这样,普通用户会有更多限制选项,而高级用户在创建集群时可以使用更多选项。
+- **更新模板设置**:如果组织的安全和 DevOps 团队决定将最佳实践嵌入到新集群所需的设置中,这些最佳实践可能会随着时间而改变。如果最佳实践发生变化,[可以将模板更新为新版本](#更新模板和集群)。这样,使用模板创建的集群可以升级到模板的新版本。
+- **共享模板的所有权**:当模板所有者不再想要维护模板或想要共享模板的所有权时,此方案描述了如何[授权模板所有权](#允许其他用户控制和共享模板)。
+
+
+## 强制执行模板设置
+
+假设一个组织的管理员决定用 Kubernetes 版本 1.14 创建所有新集群:
+
+1. 首先,管理员创建一个模板,将 Kubernetes 版本指定为 1.14,并将所有其他设置标记为**允许用户覆盖**。
+1. 管理员将模板公开。
+1. 管理员打开模板强制功能。
+
+**结果**:
+
+- 组织中的所有 Rancher 用户都可以访问该模板。
+- [普通用户](../manage-role-based-access-control-rbac/global-permissions.md)使用此模板创建的所有新集群都将使用 Kubernetes 1.14,它们无法使用其它 Kubernetes 版本。默认情况下,普通用户没有创建模板的权限。因此,除非与他们共享更多模板,否则此模板将是普通用户唯一可以使用的模板。
+- 所有普通用户都必须使用集群模板来创建新集群。他们无法在不使用模板的情况下创建集群。
+
+通过这种方式,管理员在整个组织中强制执行 Kubernetes 版本,同时仍然允许最终用户配置其他所有内容。
+
+## 普通用户和高级用户模板
+
+假设一个组织有普通用户和高级用户。管理员希望普通用户必须使用模板,而高级用户和管理员可以根据自己的需要创建集群。
+
+1. 首先,管理员开启 [RKE 模板强制执行](enforce-templates.md#强制新集群使用-rke-模板)。这意味着 Rancher 中的每个[普通用户](../manage-role-based-access-control-rbac/global-permissions.md)在创建集群时都需要使用 RKE 模板。
+1. 然后管理员创建两个模板:
+
+- 一个普通用户模板,该模板除了访问密钥外,几乎指定了所有选项
+- 一个高级用户模板,该模板具有大部分或所有已启用**允许用户覆盖**的选项
+
+1. 管理员仅与高级用户共享高级模板。
+1. 管理员将普通用户的模板公开,因此在 Rancher 中创建的 RKE 集群的每个人都能选择限制性更强的模板。
+
+**结果**:除管理员外,所有 Rancher 用户在创建集群时都需要使用模板。每个人都可以访问限制模板,但只有高级用户有权使用更宽松的模板。普通用户会受到更多限制,而高级用户在配置 Kubernetes 集群时有更多选择。
+
+## 更新模板和集群
+
+假设一个组织有一个模板,该模板要求集群使用 Kubernetes v1.14。然而,随着时间的推移,管理员改变了主意。管理员现在希望用户能够升级集群,以使用更新版本的 Kubernetes。
+
+在这个组织中,许多集群是用一个需要 Kubernetes v1.14 的模板创建的。由于模板不允许重写该设置,因此创建集群的用户无法直接编辑该设置。
+
+模板所有者可以有以下几个选项,来允许集群创建者在集群上升级 Kubernetes:
+
+- **在模板上指定 Kubernetes v1.15**:模板所有者可以创建指定 Kubernetes v1.15 的新模板修订版。然后使用该模板的每个集群的所有者可以将集群升级到模板的新版本。此模板升级允许集群创建者在集群上将 Kubernetes 升级到 v1.15。
+- **允许在模板上使用任何 Kubernetes 版本**:创建模板修订时,模板所有者还可以使用 Rancher UI 上该设置附近的开关,将 Kubernetes 版本标记为**允许用户覆盖**。该设置允许升级到此模板版本的集群使用任意 Kubernetes 的版本。
+- **允许在模板上使用最新的 Kubernetes 次要版本**:模板所有者还可以创建一个模板修订版,其中 Kubernetes 版本被定义为 **Latest v1.14(允许补丁版本升级)**。这意味着使用该版本的集群将能够进行补丁版本升级,但不支持主要版本升级。
+
+## 允许其他用户控制和共享模板
+
+假设 Alice 是 Rancher 管理员。她拥有一个 RKE 模板,该模板反映了她的组织为创建集群而商定的最佳实践。
+
+Bob 是一位高级用户,可以就集群配置做出明智的决策。随着最佳实践随着时间的推移不断更新,Alice 相信 Bob 会为她的模板创建新的修订。因此,她决定让 Bob 成为模板的所有者。
+
+为了与 Bob 共享模板的所有权,Alice [将 Bob 添加为模板的所有者](access-or-share-templates.md#共享模板所有权)。
+
+结果是,作为模板所有者,Bob 负责该模板的版本控制。Bob 现在可以执行以下所有操作:
+
+- 当最佳实践发生变化时[修改模板](manage-rke1-templates.md#更新模板)
+- [禁用模板的过时修订](manage-rke1-templates.md#禁用模板修订版),以禁止使用该模板来创建集群
+- 如果组织想要改变方向,则[删除整个模板](manage-rke1-templates.md#删除模板)
+- [将某个版本设置为默认值](manage-rke1-templates.md#将模板修订版设置为默认),用于用户创建集群。模板的最终用户仍然可以选择他们想要使用哪个版本来创建集群。
+- [与特定用户共享模板](access-or-share-templates.md),让所有 Rancher 用户都可以使用该模板,或与其他用户共享该模板的所有权。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
new file mode 100644
index 00000000000..1157400dd6a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md
@@ -0,0 +1,68 @@
+---
+title: RKE 模板和基础设施
+---
+
+在 Rancher 中,RKE 模板用于配置 Kubernetes 和定义 Rancher 设置,而节点模板则用于配置节点。
+
+因此,即使开启了 RKE 模板强制,最终用户在创建 Rancher 集群时仍然可以灵活选择底层硬件。RKE 模板的最终用户仍然可以选择基础设施提供商和他们想要使用的节点。
+
+如果要标准化集群中的硬件,请将 RKE 模板与节点模板或服务器配置工具 (如 Terraform) 结合使用。
+
+### 节点模板
+
+[节点模板](../../../../reference-guides/user-settings/manage-node-templates.md)负责 Rancher 中的节点配置和节点预配。你可以在用户配置文件中设置节点模板,从而定义在每个节点池中使用的模板。启用节点池后,可以确保每个节点池中都有所需数量的节点,并确保池中的所有节点都相同。
+
+### Terraform
+
+Terraform 是一个服务器配置工具。它使用基础架构即代码,支持使用 Terraform 配置文件创建几乎所有的基础设施。它可以自动执行服务器配置,这种方式是自文档化的,并且在版本控制中易于跟踪。
+
+本节重点介绍如何将 Terraform 与 [Rancher 2 Terraform Provider](https://www.terraform.io/docs/providers/rancher2/) 一起使用,这是标准化 Kubernetes 集群硬件的推荐选项。如果你使用 Rancher Terraform Provider 来配置硬件,然后使用 RKE 模板在该硬件上配置 Kubernetes 集群,你可以快速创建一个全面的、可用于生产的集群。
+
+Terraform 支持:
+
+- 定义几乎任何类型的基础架构即代码,包括服务器、数据库、负载均衡器、监控、防火墙设置和 SSL 证书
+- 跨多个平台(包括 Rancher 和主要云提供商)对基础设施进行编码
+- 将基础架构即代码提交到版本控制
+- 轻松重复使用基础设施的配置和设置
+- 将基础架构更改纳入标准开发实践
+- 防止由于配置偏移,导致一些服务器的配置与其他服务器不同
+
+## Terraform 工作原理
+
+Terraform 是用扩展名为 `.tf` 的文件编写的。它是用 HashiCorp 配置语言编写的。HashiCorp 配置语言是一种声明性语言,支持定义集群中所需的基础设施、正在使用的云提供商以及提供商的凭证。然后 Terraform 向提供商发出 API 调用,以便有效地创建基础设施。
+
+要使用 Terraform 创建 Rancher 配置的集群,请转到你的 Terraform 配置文件并将提供商定义为 Rancher 2。你可以使用 Rancher API 密钥设置你的 Rancher 2 提供商。请注意,API 密钥与其关联的用户具有相同的权限和访问级别。
+
+然后 Terraform 会调用 Rancher API 来配置你的基础设施,而 Rancher 调用基础设施提供商。例如,如果你想使用 Rancher 在 AWS 上预配基础设施,你需要在 Terraform 配置文件或环境变量中提供 Rancher API 密钥和 AWS 凭证,以便它们用于预配基础设施。
+
+如果你需要对基础设施进行更改,你可以在 Terraform 配置文件中进行更改,而不是手动更新服务器。然后,可以将这些文件提交给版本控制、验证,并根据需要进行检查。然后,当你运行 `terraform apply` 时,更改将会被部署。
+
+## 使用 Terraform 的技巧
+
+- [Rancher 2 提供商文档](https://www.terraform.io/docs/providers/rancher2/)提供了如何配置集群大部分的示例。
+
+- 在 Terraform 设置中,你可以使用 Docker Machine 主机驱动来安装 Docker Machine。
+
+- 可以在 Terraform Provider 中修改身份验证。
+
+- 可以通过更改 Rancher 中的设置,来反向工程如何在 Terraform 中定义设置,然后返回并检查 Terraform 状态文件,以查看该文件如何映射到基础设施的当前状态。
+
+- 如果你想在一个地方管理 Kubernetes 集群设置、Rancher 设置和硬件设置,请使用 [Terraform 模块](https://github.com/rancher/terraform-modules)。你可以将集群配置 YAML 文件或 RKE 模板配置文件传递给 Terraform 模块,以便 Terraform 模块创建它。在这种情况下,你可以使用基础架构即代码来管理 Kubernetes 集群及其底层硬件的版本控制和修订历史。
+
+## 创建符合 CIS 基准的集群的技巧
+
+本节描述了一种方法,可以使安全合规相关的配置文件成为集群的标准配置文件。
+
+在你创建[符合 CIS 基准的集群](../../../../pages-for-subheaders/rancher-security.md)时,你有一个加密配置文件和一个审计日志配置文件。
+
+你的基础设施预配系统可以将这些文件写入磁盘。然后在你的 RKE 模板中,你需要指定这些文件的位置,然后将你的加密配置文件和审计日志配置文件作为额外的挂载添加到 `kube-api-server`。
+
+然后,你需要确保 RKE 模板中的 `kube-api-server` 标志使用符合 CIS 的配置文件。
+
+通过这种方式,你可以创建符合 CIS 基准的标志。
+
+## 资源
+
+- [Terraform 文档](https://www.terraform.io/docs/)
+- [Rancher2 Terraform Provider 文档](https://www.terraform.io/docs/providers/rancher2/)
+- [The RanchCast - 第 1 集:Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8):在此演示中,社区主管 Jason van Brackel 使用 Rancher 2 Terraform Provider 创建了节点并创建自定义集群。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
new file mode 100644
index 00000000000..6e4adb98239
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md
@@ -0,0 +1,161 @@
+---
+title: 创建和修改 RKE 模板
+---
+
+本节介绍如何管理 RKE 模板和修订版。你可以从 **RKE1 配置 > RKE 模板**下的**集群管理**视图创建、共享、更新和删除模板。
+
+模板更新通过修订系统处理。当模板所有者想要更改或更新模板时,他们会创建模板的新版本。单个修订无法编辑。但是,如果你想防止使用修订来创建新集群,你可以禁用它。
+
+你可以使用两种方式来使用模板修订:创建新集群,或升级使用较早版本的模板创建的集群。模板创建者可以设置默认修订版,但是在最终用户创建集群时,他们可以选择任何模板以及可供使用的任何模板修订版。使用指定的修订版创建集群后,就无法将其更改为另一个模板,但是可以将集群升级为同一模板的较新的可用修订版。
+
+模板所有者对模板修订版具有完全控制权,并且可以创建新的修订版来更新模板,删除或禁用不应被用于创建集群的修订版,和设置默认的模板修订版。
+
+
+### 先决条件
+
+如果你具有**创建 RKE 模板**权限,则可以创建 RKE 模板,该权限可由[管理员授予](creator-permissions.md)。
+
+如果你是模板的所有者,你可以修改、共享和删除模板。有关如何成为模板所有者的详细信息,请参阅[共享模板所有权文档](access-or-share-templates.md#共享模板所有权)。
+
+### 创建模板
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 单击 **RKE1 配置 > 节点模板**。
+1. 单击**添加模板**。
+1. 输入模板的名称。Rancher 已经为模板的第一个版本自动生成了名称,该版本与该模板一起创建。
+1. 可选:通过将用户添加为成员,来[与其他用户或组共享模板](access-or-share-templates.md#与特定用户或组共享模板)。你还可以将模板公开,从而与 Rancher 中的所有人共享。
+1. 然后按照屏幕上的表格将集群配置参数保存为模板修订的一部分。可以将修订标记为此模板的默认值。
+
+**结果**:配置了具有一个修订版的 RKE 模板。你可以稍后在[配置 Rancher 启动的集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)时使用此 RKE 模板修订版。通过 RKE 模板管理集群后,集群无法解除与模板的绑定,并且无法取消选中**使用现有 RKE 模板和修订版**。
+
+### 更新模板
+
+更新 RKE 模板相当于创建现有模板的修订版。使用旧版本模板创建的集群可以进行更新,从而匹配新版本。
+
+你不能编辑单个修订。由于你无法编辑模板的单个修订,为了防止使用某个修订,你可以[禁用该修订版](#禁用模板修订版)。
+
+创建新模板修订时,使用旧模板修订的集群不受影响。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要编辑的模板,然后单击 **⋮ > 编辑**。
+1. 编辑所需信息并单击**保存**。
+1. 可选:你可以更改此模板的默认修订版,也可以更改共享对象。
+
+**结果**:模板已更新。要将其应用到使用旧版本模板的集群,请参阅[升级集群以使用新的模板修订版](#升级集群以使用新的模板修订版)。
+
+### 删除模板
+
+当不再需要为任何集群使用某个 RKE 模板时,可以将其删除。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 单击 **RKE1 配置 > RKE 模板**。
+1. 转到要删除的 RKE 模板,然后单击 **⋮ > 删除**。
+1. 确认删除。
+
+**结果**:模板被删除。
+
+### 基于默认版创建新修订版
+
+你可以复制默认模板修订版并快速更新其设置,而无需从头开始创建新修订版。克隆模板为你省去了重新输入集群创建所需的访问密钥和其他参数的麻烦。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要克隆的 RKE 模板,然后单击 **⋮ > 基于默认版创建新修订版**。
+1. 填写表单的其余部分来创建新修订。
+
+**结果**:克隆并配置了 RKE 模板修订版。
+
+### 基于克隆版创建新修订版
+
+通过用户设置创建新的 RKE 模板修订版时,可以克隆现有修订版并快速更新其设置,而无需从头开始创建新的修订版。克隆模板修订省去了重新输入集群参数的麻烦。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在 **RKE1 配置**下,单击 **RKE 模板**。
+1. 转到要克隆的模板修订。然后选择 **⋮ > 克隆修订版**。
+1. 填写表单的其余部分。
+
+**结果**:克隆并配置了 RKE 模板修订版。你可以在配置集群时使用 RKE 模板修订。任何使用此 RKE 模板的现有集群都可以升级到此新版本。
+
+### 禁用模板修订版
+
+当你不需要将 RKE 模板修订版本用于创建新集群时,可以禁用模板修订版。你也可以重新启用禁用了的修订版。
+
+如果没有任何集群使用该修订,你可以禁用该修订。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要禁用的模板修订版。然后选择 **⋮ > 禁用**。
+
+**结果**:RKE 模板修订版不能用于创建新集群。
+
+### 重新启用禁用的模板修订版
+
+如果要使用已禁用的 RKE 模板修订版来创建新集群,你可以重新启用该修订版。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在 **RKE1 配置**下,单击 **RKE 模板**。
+1. 转到要重新启用的模板修订。然后选择 **⋮ > 启用**。
+
+**结果**:RKE 模板修订版可用于创建新集群。
+
+### 将模板修订版设置为默认
+
+当最终用户使用 RKE 模板创建集群时,他们可以选择使用哪个版本来创建集群。你可以配置默认使用的版本。
+
+要将 RKE 模板修订版设置为默认:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要设为默认的 RKE 模板修订版,然后单击 **⋮ > 设为默认配置**。
+
+**结果**:使用模板创建集群时,RKE 模板修订版将用作默认选项。
+
+### 删除模板修订版
+
+你可以删除模板的所有修订(默认修订除外)。
+
+要永久删除修订版:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏,单击 **RKE1 配置 > RKE 模板**。
+1. 转到要删除的 RKE 模板修订版,然后单击 **⋮ > 删除**。
+
+**结果**:RKE 模板修订版被删除。
+
+### 升级集群以使用新的模板修订版
+
+:::note
+
+本部分假设你已经有一个集群,该集群[应用了 RKE 模板](apply-templates.md)。
+
+本部分还假设你已[更新了集群使用的模板](#更新模板),以便可以使用新的模板修订版。
+
+:::
+
+要将集群升级到使用新的模板修订版:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 转到要升级的集群,然后单击 **⋮ > 编辑配置**。
+1. 在**集群选项**中,单击模板修订版的下拉菜单,然后选择新的模板修订版。
+1. 单击**保存**。
+
+**结果**:集群已升级为使用新模板修订版中定义的设置。
+
+### 将正在运行的集群导出到新的 RKE 模板和修订版
+
+你可以将现有集群的设置保存为 RKE 模板。
+
+这将把集群的设置导出为新的 RKE 模板,并且将集群绑定到该模板。然后,只有[更新了模板](#更新模板)并且集群升级到[使用更新版本的模板](#升级集群以使用新的模板修订版)时,集群才能改变。
+
+要将现有集群转换为使用 RKE 模板:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 转到将被转换为使用 RKE 模板的集群,然后 **⋮ > 保存为 RKE 模板**。
+1. 在出现的表单中输入 RKE 模板的名称,然后单击**创建**。
+
+**结果**:
+
+- 创建了一个新的 RKE 模板。
+- 将集群转换为使用该新模板。
+- 可以[使用新模板和修订版创建新集群。](apply-templates.md#使用-rke-模板创建集群)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md
new file mode 100644
index 00000000000..11ecac6fabd
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md
@@ -0,0 +1,14 @@
+---
+title: 覆盖模板设置
+---
+
+用户创建 RKE 模板时,模板中的每个设置在 Rancher UI 中都有一个开关,指示用户是否可以覆盖该设置。此开关将这些设置标记为**允许用户覆盖**。打开开关表示用户可以修改对应的参数,关闭开关表示用户无权修改对应的参数。
+
+使用模板创建集群后,除非模板所有者将设置标记为**允许用户覆盖**,否则最终用户无法更新模板中定义的任何设置。但是,如果模板[更新到新修订版](manage-rke1-templates.md),且该修订版更改了设置或允许最终用户更改设置,则集群可以升级到模板的新修订版,并且新修订版中的更改将应用于集群。
+
+如果 RKE 模板上的任何参数设置为**允许用户覆盖**,最终用户必须在集群创建期间设置这些字段,然后他们可以随时编辑这些设置。
+
+RKE 模板的**允许用户覆盖**选项的适用场景如下:
+
+- 管理员认为某些参数需要保持灵活性,以便随时更新。
+- 最终用户将需要输入他们自己的访问密钥或密文密钥,例如,云凭证或备份快照的凭证。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
new file mode 100644
index 00000000000..9d2a9590372
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/authentication-config.md
@@ -0,0 +1,145 @@
+---
+title: 配置认证
+weight: 10
+---
+
+
+
+
+
+Rancher 为 Kubernetes 添加的一个关键功能是集中式用户认证,这个特性允许用户使用一组凭证对任何 Kubernetes 集群进行身份认证。
+
+这种集中式用户认证是通过 Rancher 的认证代理完成的,该代理与 Rancher 的其余部分一并安装,此代理对用户进行认证并通过一个 Service Acount 将请求转发到 Kubernetes 集群中。
+
+:::warning
+
+用来启用外部认证的账户将被授予管理员权限。如果你使用一个测试账号或非管理员账号,该账号仍然会被授予管理员级别权限。请查看[外部认证配置和主体用户](#外部认证配置和用户主体)了解原因。
+
+:::
+
+## 外部认证与本地认证
+
+Rancher 认证代理可以与以下外部认证服务集成。
+
+| 认证服务 |
+| ---------------------------------------------------------------------------------------------------------------------- |
+| [Microsoft Active Directory](configure-active-directory.md) |
+| [GitHub](configure-github.md) |
+| [Microsoft Azure AD](configure-azure-ad.md) |
+| [FreeIPA](configure-freeipa.md) |
+| [OpenLDAP](../configure-openldap/configure-openldap.md) |
+| [Microsoft AD FS](../configure-microsoft-ad-federation-service-saml/configure-microsoft-ad-federation-service-saml.md) |
+| [PingIdentity](configure-pingidentity.md) |
+| [Keycloak (OIDC)](configure-keycloak-oidc.md) |
+| [Keycloak (SAML)](configure-keycloak-saml.md) |
+| [Okta](configure-okta-saml.md) |
+| [Google OAuth](configure-google-oauth.md) |
+| [Shibboleth](../configure-shibboleth-saml/configure-shibboleth-saml.md) |
+
+当然,Rancher 也提供[本地认证](create-local-users.md).
+
+在多数情况下,你应该使用外部认证服务而不是使用本地认证,因为外部认证服务可以集中式的对用户进行管理。但是在极少数情况下,例如外部认证服务不可用或正在维护时,你可能需要使用本地认证用户来管理 Rancher。
+
+## 用户和组
+
+Rancher 依赖用户和组来决定允许谁登录 Rancher 以及他们可以访问哪些资源。当使用外部认证时,外部认证系统会根据用户提供组的信息。这些用户和组被赋予了集群、项目及全局 DNS 提供商和条目等资源的特定角色。当你对组进行授权时,在认证服务中所有属于这个组中的用户都有访问指定的资源的权限。有关角色和权限的更多信息,请查看 [RBAC](../manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
+
+:::note
+
+本地认证不支持创建或管理组
+
+:::
+
+更多信息,请查看[用户和组](manage-users-and-groups.md)
+
+## Rancher 授权范围
+
+当你配置完 Rancher 使用外部认证服务后,你可以配置允许谁登录和使用 Rancher,包含如下的选项:
+
+| 访问级别 | 描述 |
+| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 允许任何有效用户 | 在认证服务中的*任何*用户都可以访问 Rancher。通常情况下不建议使用该设置! |
+| 允许集群和项目成员,以及授权的用户和组织 | 认证服务中属于**集群成员**或**项目成员**的用户或组成员都可以登录 Rancher。此外添加在**授权的用户和组织**列表中的用户和组成员也可以登录到 Rancher。 |
+| 仅限于授权的用户可以访问 | 仅有在授权用户和组织列表中的用户和组成员可以登录到 Rancher。 |
+
+要在授权服务中为用户设置 Rancher 访问级别,请执行以下步骤:
+
+1. 在左上角,点击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,点击 **认证**.
+1. 设置完外部认证详细信息后,使用 **站点访问** 选项配置用户权限范围,上面的表格说明了每个选项的访问级别。
+1. 可选:如果你选择 **允许任何有效用户** 以外的选项,你可以通过在出现的文本框中搜索用户,将用户添加到授权用户和组织的列表中。
+1. 点击 **保存**。
+
+**结果:** Rancher 的访问配置被应用。
+
+:::note SAML 认证警告:
+
+- SAML 协议不支持搜索或查找用户或组。因此,将用户或组添加到 Rancher 时不会对其进行验证。
+- 添加用户时,必须正确输入确切的用户 ID(即 UID 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
+- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
+- 用户组下拉列表仅显示您所属的用户组。您将无法添加您不是其成员的组。
+
+:::
+
+## 外部认证配置和用户主体
+
+配置外部认证需要:
+
+- 分配了管理员角色的本地用户,以下称为 _本地主体_。
+- 可以使用外部认证服务进行认证的外部用户,以下简称为 _外部主体_。
+
+外部认证的配置也会影响 Rancher 中主体用户的管理方式,具体地说,当用户账户启用了外部认证时,将授予其管理员级别的权限。这是因为本地主体和外部主体共享相同的用户 ID 和访问权限。
+
+以下说明演示了这些效果:
+
+1. 作为本地主体登录到 Rancher 并完成外部身份验证的配置。
+
+ 
+
+2. Rancher 将外部主体与本地主体相关联。这两个用户共享本地主体的用户 ID。
+
+ 
+
+3. 完成配置后,Rancher 将自动退出本地主体。
+
+ 
+
+4. 然后,Rancher 会自动将您登录外部主体。
+
+ 
+
+5. 因为外部主体和本地主体共享一个 ID,所以用户列中不会再单独显示一个另外的外部主体的对象。
+
+ 
+
+6. 外部主体和本地主体共享相同的访问权限。
+
+:::note 重新配置先前设置的认证
+
+如果需要重新配置或禁用后重新启用先前设置过的认证,请确保尝试这样做的用户以外部用户身份登录到 Rancher,而不是使用本地管理员登录。
+
+:::
+
+## 禁用认证
+
+当你禁用认证时,Rancher 会删除所有与之关联的资源,例如:
+
+- 密文
+- 绑定的全局角色。
+- 绑定的集群角色。
+- 绑定的项目角色。
+- 与外部认证关联但从未以本地用户身份登录 Rancher 的外部用户。
+
+由于此操作可能会导致许多资源丢失,因此你可能需要添加一些保护措施。若要确保禁用外部认证时不执行清理流程,需要为外部认证的配置添加特殊的注释。
+
+例如,若要对 Azure AD 认证增加保护措施,你需要在 authconfig 对象上增加 `azuread` 注释:
+
+`kubectl annotate --overwrite authconfig azuread management.cattle.io/auth-provider-cleanup='user-locked'`
+
+禁用 Azure AD 认证后,Rancher 不会执行清理流程,直到你将该注解设置为 `unlocked`。
+
+### 手动运行资源清理
+
+Rancher 可能会在本地集群中保留之前禁用的外部认证配置的资源,即使你配置对接了另一种认证也是如此。例如,如果你对接了 A 认证,然后禁用它,并重新对接使用 B 认证,当你升级到新版本的 Rancher 时,你可以手动触发对认证 A 配置的资源清理。
+
+要手动触发已禁用的认证配置的清理,请将 `unlocked` 值添加到对应认证配置的 `management.cattle.io/auth-provider-cleanup` 注解中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
new file mode 100644
index 00000000000..704c47a68c5
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md
@@ -0,0 +1,219 @@
+---
+title: 配置 Active Directory (AD)
+---
+
+如果你的组织使用 Microsoft Active Directory 作为中心用户仓库,你可以将 Rancher 配置为与 Active Directory 服务器通信,从而对用户进行身份验证。这使 Rancher 管理员可以对外部用户系统中的用户和组进行集群和项目的访问控制,同时允许最终用户在登录 Rancher UI 时使用 Active Directory 凭证进行身份验证。
+
+Rancher 使用 LDAP 与 Active Directory 服务器通信。因此,Active Directory 与 [OpenLDAP 身份验证](../../../../pages-for-subheaders/configure-openldap.md)的流程相同。
+
+:::note
+
+在开始之前,请熟悉[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)的概念。
+
+:::
+
+## 先决条件
+
+你需要创建或从你的 AD 管理员处获取一个新的 AD 用户,用作 Rancher 的 ServiceAccount。此用户必须具有足够的权限,才能执行 LDAP 搜索并读取你的 AD 域下的用户和组的属性。
+
+通常可以使用(非管理员)**域用户**账号,因为默认情况下,该用户对域分区中的大多数对象具有只读特权。
+
+但是,请注意,在某些锁定的 Active Directory 配置中,此默认操作可能不适用。在这种情况下,你需要确保 ServiceAccount 用户在 Base OU(包含用户和组)上或全局范围内至少拥有域的 **Read** 和 **List Content** 权限。
+
+:::note 使用 TLS?
+
+- 如果 AD 服务器使用的证书是自签名的或不是来自认可的证书颁发机构,请确保手头有 PEM 格式的 CA 证书(包含所有中间证书)。你必须在配置期间粘贴此证书,以便 Rancher 能够验证证书链。
+
+- 升级到 v2.6.0 后,如果 AD 服务器上的证书不支持 SAN 属性,则使用 TLS 通过 Rancher 对 Active Directory 进行身份验证可能会失败。这是 Go v1.15 中默认启用的检查。
+
+ - 收到"Error creating SSL connection: LDAP Result Code 200 "Network Error": x509 错误:证书依赖于旧的通用名称(Common Name)字段,使用 SAN 或临时启用与 GODEBUG=x509ignoreCN=0 匹配的通用名称。
+
+ - 要解决此错误,请使用支持 SAN 属性的新证书更新或替换 AD 服务器上的证书。或者,将 `GODEBUG=x509ignoreCN=0` 设置为 Rancher Server 容器的环境变量来忽略此错误。
+
+:::
+
+## 配置步骤
+### 打开 Active Directory 配置
+
+1. 使用初始的本地 `admin` 账号登录到 Rancher UI。
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **ActiveDirectory**。然后会显示**验证提供程序:ActiveDirectory** 的表单。
+1. 填写表单。如果需要获取帮助,请参见下方的配置选项详情。
+1. 点击**启用**。
+
+### 配置 Active Directory 服务器
+
+在 `1. 配置 Active Directory 服务器` 的步骤中,使用你 Active Directory 的实际信息完成字段配置。有关每个参数所需值的详细信息,请参阅下表。
+
+:::note
+
+如果你不确定要在用户/组`搜索库`字段中输入什么值,请参见[使用 ldapsearch 确定搜索库和 Schema](#附录使用-ldapsearch-确定搜索库和-schema)。
+
+:::
+
+**表 1:AD 服务器参数**
+
+| 参数 | 描述 |
+|:--|:--|
+| 主机名 | 指定 AD 服务器的主机名或 IP 地址。 |
+| 端口 | 指定 AD 服务器监听连接的端口。未加密的 LDAP 通常使用 389 的标准端口,而 LDAPS 使用 636 端口。 |
+| TLS | 选中此框可启用 SSL/TLS 上的 LDAP(通常称为 LDAPS)。 |
+| 服务器连接超时 | Rancher 在认为无法访问 AD 服务器之前等待的时间(秒)。 |
+| ServiceAccount 用户名 | 输入对域分区具有只读访问权限的 AD 账号的用户名(参见[先决条件](#先决条件))。用户名可以用 NetBIOS(例如 "DOMAIN\serviceaccount")或 UPN 格式(例如 "serviceaccount@domain.com")。 |
+| ServiceAccount 密码 | ServiceAccount 的密码。 |
+| 默认登录域 | 如果你使用 AD 域的 NetBIOS 名称配置此字段,在绑定到 AD 服务器时,没有包含域的用户名(例如“jdoe”)将自动转换为带斜杠的 NetBIOS 登录(例如,“LOGIN_DOMAIN\jdoe”)。如果你的用户以 UPN(例如,"jdoe@acme.com")作为用户名进行身份验证,则此字段必须**必须**留空。 |
+| 用户搜索库 | 输入目录树中开始搜索用户对象的节点的标识名称(DN)。所有用户都必须是此基础标识名称的后代。例如,"ou=people,dc=acme,dc=com"。 |
+| 组搜索库 | 如果组位于`用户搜索库`下配置的节点之外的其他节点下,则需要在此处提供标识名称。否则请留空。例如:"ou=groups,dc=acme,dc=com"。 |
+
+---
+
+### 配置用户/组 Schema
+
+在 `2. 自定义 Schema` 中,你必须为 Rancher 提供与目录中使用的 Schema 对应的用户和组属性的正确映射。
+
+Rancher 使用 LDAP 查询来搜索和检索关于 Active Directory 中的用户和组的信息。本节中配置的属性映射用于构造搜索筛选器和解析组成员身份。因此,所提供的设置需要反映你 AD 域的真实情况。
+
+:::note
+
+如果你不熟悉 Active Directory 域中使用的 Schema,请参见[使用 ldapsearch 确定搜索库和 Schema](#附录使用-ldapsearch-确定搜索库和-schema) 来确定正确的配置值。
+
+:::
+
+#### 用户 Schema
+
+下表详细说明了用户 Schema 配置的参数。
+
+**表 2:用户 Schema 配置参数**
+
+| 参数 | 描述 |
+|:--|:--|
+| Object Class | 域中用于用户对象的对象类别名称。如果定义了此参数,则仅指定对象类别的名称 - *请勿*将其放在 LDAP 包装器中,例如 `&(objectClass=xxxx)`。 |
+| Username Attribute | 用户属性的值适合作为显示名称。 |
+| Login Attribute | 登录属性的值与用户登录 Rancher 时输入的凭证的用户名部分匹配。如果你的用户以他的 UPN(例如 "jdoe@acme.com")作为用户名进行身份验证,则此字段通常必须设置为 `userPrincipalName`。否则,对于旧的 NetBIOS 风格的登录名(例如 "jdoe"),则通常设为 `sAMAccountName`。 |
+| User Member Attribute | 包含用户所属组的属性。 |
+| Search Attribute | 当用户输入文本以在用户界面中添加用户或组时,Rancher 会查询 AD 服务器,并尝试根据此设置中提供的属性匹配用户。可以通过使用管道(“\|”)符号分隔属性来指定多个属性。要匹配 UPN 用户名(例如 jdoe@acme.com),通常应将此字段的值设置为 `userPrincipalName`。 |
+| Search Filter | 当 Rancher 尝试将用户添加到网站访问列表,或尝试将成员添加到集群或项目时,此筛选器将应用于搜索的用户列表。例如,用户搜索筛选器可能是 (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io))。注意:如果搜索筛选器未使用[有效的 AD 搜索语法](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax),则用户列表将为空。 |
+| User Enabled Attribute | 该属性是一个整数值,代表用户账号标志的枚举。Rancher 使用此选项来确定用户账号是否已禁用。通常应该将此参数设置为 AD 标准的 `userAccountControl`。 |
+| Disabled Status Bitmask | 指定的禁用用户账号的 `User Enabled Attribute` 的值。通常,你应该将此参数设置为 Microsoft Active Directory Schema 中指定的默认值 2(请参见[此处](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks))。 |
+
+---
+
+#### 组 Schema
+
+下表详细说明了组 Schema 配置的参数。
+
+**表 3:组 Schema 配置参数**
+
+| 参数 | 描述 |
+|:--|:--|
+| Object Class | 域中用于组对象的对象类别名称。如果定义了此参数,则仅指定对象类别的名称 - *请勿*将其放在 LDAP 包装器中,例如 `&(objectClass=xxxx)`。 |
+| Name Attribute | 名称属性的值适合作为显示名称。 |
+| Group Member User Attribute | **用户属性**的名称。它的格式与 `Group Member Mapping Attribute` 中的组成员匹配。 |
+| Group Member Mapping Attribute | 包含组成员的组属性的名称。 |
+| Search Attribute | 在将组添加到集群或项目时,用于构造搜索筛选器的属性。请参见用户 Schema 的 `Search Attribute`。 |
+| Search Filter | 当 Rancher 尝试将组添加到网站访问列表,或将组添加到集群或项目时,此筛选器将应用于搜索的组列表。例如,组搜索筛选器可以是 (|(cn=group1)(cn=group2))。注意:如果搜索筛选器未使用[有效的 AD 搜索语法](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax),则组列表将为空。 |
+| Group DN Attribute | 组属性的名称,其格式与描述用户成员身份的用户属性中的值匹配。参见 `User Member Attribute`。 |
+| Nested Group Membership | 此设置定义 Rancher 是否应解析嵌套组成员身份。仅当你的组织使用这些嵌套成员身份时才使用(即你有包含其他组作为成员的组。我们建议尽量避免使用嵌套组,从而避免在存在大量嵌套成员时出现潜在的性能问题)。 |
+
+---
+
+### 测试身份验证
+
+完成配置后,请**使用你的 AD 管理员账户**测试与 AD 服务器的连接。如果测试成功,将启用配置的 Active Directory 身份验证,测试时使用的账号会成为管理员。
+
+:::note
+
+与此步骤中输入的凭证相关的 AD 用户将映射到本地主体账号,并在 Rancher 中分配系统管理员权限。因此,你应该决定使用哪个 AD 账号来执行此步骤。
+
+:::
+
+1. 输入应映射到本地主体账号的 AD 账号的**用户名**和**密码** 。
+2. 点击**启用 Active Directory 认证**来完成设置。
+
+**结果**:
+
+- 已启用 Active Directory 身份验证。
+- 你已使用 AD 凭证以系统管理员身份登录到 Rancher。
+
+:::note
+
+如果 LDAP 服务中断,你仍然可以使用本地配置的 `admin` 账号和密码登录。
+
+:::
+
+## 附录:使用 ldapsearch 确定搜索库和 Schema
+
+为了成功配置 AD 身份验证,你必须提供 AD 服务器的层次结构和 Schema 的正确配置。
+
+[`ldapsearch`](https://manpages.ubuntu.com/manpages/kinetic/en/man1/ldapsearch.1.html) 工具允许你查询你的 AD 服务器,从而了解用于用户和组对象的 Schema。
+
+在下面的示例命令中,我们假设:
+
+- Active Directory 服务器的主机名是 `ad.acme.com`。
+- 服务器正在监听端口 `389` 上的未加密连接。
+- Active Directory 的域是 `acme`。
+- 你有一个用户名为 `jdoe`,密码为 `secret` 的有效 AD 账号。
+
+### 确认搜索库
+
+首先,我们将使用 `ldapsearch` 来找到用户和组的父节点的标识名称:
+
+```
+$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \
+-h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe"
+```
+
+此命令执行 LDAP 搜索,搜索起点设置为域根目录(`-b "dc=acme,dc=com"`),并执行针对用户账号(`sAMAccountNam=jdoe`)的筛选器,返回所述用户的属性:
+
+
+
+因为在这种情况下,用户的 DN 是 `CN=John Doe,CN=Users,DC=acme,DC=com` [5],所以我们应该使用父节点 DN `CN=Users,DC=acme,DC=com` 来配置**用户搜索库**。
+
+同样,基于 **memberOf** 属性 [4] 中引用的组的 DN,**组搜索库**的值将是该值的父节点,即 `OU=Groups,DC=acme,DC=com`。
+
+### 确定用户 Schema
+
+上述 `ldapsearch` 查询的输出还能用于确定在用户 Schema 配置中使用的值:
+
+- `Object Class`:**person** [1]
+- `Username Attribute`::**name** [2]
+- `Login Attribute`:**sAMAccountName** [3]
+- `User Member Attribute`:**memberOf** [4]
+
+:::note
+
+如果我们组织中的 AD 用户使用其 UPN(例如 `jdoe@acme.com`)而不是短登录名进行身份验证,则必须将 `Login Attribute` 设置为 **userPrincipalName**。
+
+:::
+
+我们还将 `Search Attribute` 数设置为 **sAMAccountName|name**。这样,用户可以通过输入用户名或全名添加到 Rancher UI 中的集群/项目中。
+
+### 确定组 Schema
+
+接下来,我们将查询与此用户关联的一个组,在本例中为 `CN=examplegroup,OU=Groups,DC=acme,DC=com`:
+
+```
+$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \
+-h ad.acme.com -b "ou=groups,dc=acme,dc=com" \
+-s sub "CN=examplegroup"
+```
+
+此命令将告知我们用于组对象的属性:
+
+
+
+同样,这能让我们确定要在组 Schema 配置中输入的值:
+
+- `Object Class`:**group** [1]
+- `Name Attribute`:**name** [2]
+- `Group Member Mapping Attribute`:**member** [3]
+- `Search Attribute`:**sAMAccountName** [4]
+
+查看 **member** 属性的值,我们可以看到它包含被引用用户的 DN。这对应我们的用户对象中的 **distinguishedName** 属性。因此,必须将 `Group Member User Attribute` 参数的值设置为此属性。
+
+同样,我们可以看到用户对象中 **memberOf** 属性中的值对应组的 **distinguishedName** [5]。因此,我们需要将 `Group DN Attribute` 参数的值设置为此属性。
+
+## 附录:故障排除
+
+如果在测试与 Active Directory 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
new file mode 100644
index 00000000000..77d51e007bf
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-azure-ad.md
@@ -0,0 +1,326 @@
+---
+title: 配置 Azure AD
+---
+
+## Microsoft Graph API
+
+Microsoft Graph API 现在是设置 Azure AD 的流程。下文将帮助[新用户](#新用户设置)使用新实例来配置 Azure AD,并帮助现有 Azure 应用所有者[迁移到新流程](#从-azure-ad-graph-api-迁移到-microsoft-graph-api)。
+
+Rancher 中的 Microsoft Graph API 流程正在不断发展。建议你使用最新的 2.7 补丁版本,该版本仍在积极开发中,并将持续获得新功能和改进。
+
+### 新用户设置
+
+如果你在 Azure 中托管了一个 Active Directory(AD)实例,你可以将 Rancher 配置为允许你的用户使用 AD 账号登录。你需要在 Azure 和 Rancher 中进行 Azure AD 外部身份验证。
+
+:::note 注意事项
+
+- Azure AD 集成仅支持服务提供商发起的登录。
+- 大部分操作是从 [Microsoft Azure 门户](https://portal.azure.com/)执行的。
+
+:::
+
+#### Azure Active Directory 配置概述
+
+要将 Rancher 配置为允许用户使用其 Azure AD 账号进行身份验证,你需要执行多个步骤。在你开始之前,请查看下文操作步骤大纲。
+
+:::tip
+
+在开始之前,打开两个浏览器选项卡:一个用于 Rancher,另一个用于 Azure 门户。这样,你可以将门户的配置值复制并粘贴到 Rancher 中。
+
+:::
+
+
+#### 1. 在 Azure 注册 Rancher
+
+在 Rancher 中启用 Azure AD 之前,你必须先向 Azure 注册 Rancher。
+
+1. 以管理用户身份登录 [Microsoft Azure](https://portal.azure.com/)。后续配置步骤中需要管理访问权限。
+
+1. 使用搜索功能打开 **App registrations** 服务。
+
+1. 点击 **New registration** 并填写表单。
+
+ 
+
+ 1. 输入 **Name**(例如 `Rancher`)。
+
+
+ 1. 在 **Supported account types** 中,选择 **Accounts in this organizational directory only (AzureADTest only - Single tenant)**。这对应于旧版应用注册选项。
+
+ :::note
+
+ 在更新后的 Azure 门户中,Redirect URI 与 Reply URL 的意思相同。为了将 Azure AD 与 Rancher 一起使用,你必须将 Rancher 列入 Azure 白名单(之前通过 Reply URL 完成)。因此,请确保使用你的 Rancher Server URL 填写 Redirect URI,以包含下面列出的验证路径。
+
+ :::
+
+ 1. 在 [**Redirect URI**](https://docs.microsoft.com/en-us/azure/active-directory/develop/reply-url) 中,确保从下拉列表中选择 **Web**,并在旁边的文本框中输入 Rancher Server 的 URL。Rancher Server URL 后需要追加验证路径,例如 `/verify-auth-azure`。
+
+ :::tip
+
+ 你可以在 Azure AD 身份验证页面(全局视图 > Authentication > Web)中找到你 Rancher 中的个性化 Azure Redirect URI(Reply URL)。
+
+ :::
+
+ 1. 单击 **Register**。
+
+:::note
+
+此更改可能需要五分钟才能生效。因此,如果在配置 Azure AD 之后无法立即进行身份验证,请不要惊慌。
+
+:::
+
+#### 2. 创建客户端密文
+
+在 Azure 门户中,创建一个客户端密文。Rancher 将使用此密钥向 Azure AD 进行身份验证。
+
+1. 使用搜索功能打开 **App registrations** 服务。然后打开你在上一个步骤中创建的 Rancher 项。
+
+ 
+
+1. 在导航窗格中,单击 **Certificates & secrets**。
+
+1. 单击 **New client secret**。
+ 
+1. 输入 **Description**(例如 `Rancher`)。
+1. 从 **Expires** 下的选项中选择持续时间。此下拉菜单设置的是密钥的到期日期。日期越短则越安全,但需要你更频繁地创建新密钥。
+ 请注意,如果检测到应用程序 Secret 已过期,用户将无法登录 Rancher。为避免此问题,请在 Azure 中轮换 Secret 并在过期前在 Rancher 中更新它。
+1. 单击 **Add**(无需输入值,保存后会自动填充)。
+
+
+1. 稍后你将在 Rancher UI 中输入此密钥作为你的 **Application Secret**。由于你将无法在 Azure UI 中再次访问键值,因此请在其余设置过程中保持打开此窗口。
+
+#### 3. 设置 Rancher 所需的权限
+
+接下来,在 Azure 中为 Rancher 设置 API 权限。
+
+:::caution
+
+确保你设置了 Application 权限,而*不是* Delegated 权限。否则,你将无法登录 Azure AD。
+
+:::
+
+1. 在导航窗格中,选择 **API permissions**。
+
+1. 单击 **Add a permission**。
+
+1. 从 Microsoft Graph API 中,选择以下 **Application Permissions**: `Directory.Read.All`。
+
+ 
+
+1. 返回导航栏中的 **API permissions**。在那里,单击 **Grant admin consent**。然后单击 **Yes**。该应用程序的权限应如下所示:
+
+
+
+:::note
+
+Rancher 不会验证你授予 Azure 应用程序的权限。你可以自由使用任何你所需的权限,只要这些权限允许 Rancher 使用 AD 用户和组。
+
+具体来说,Rancher 需要允许以下操作的权限:
+- 获取一个用户。
+- 列出所有用户。
+- 列出给定用户所属的组。
+- 获取一个组。
+- 列出所有组。
+
+Rancher 执行这些操作来登录用户或搜索用户/组。请记住,权限必须是 `Application` 类型。
+
+下面是几个满足 Rancher 需求的权限组合示例:
+- `Directory.Read.All`
+- `User.Read.All` 和 `GroupMember.Read.All`
+- `User.Read.All` 和 `Group.Read.All`
+
+:::
+
+#### 4. 复制 Azure 应用数据
+
+
+
+1. 获取你的 Rancher **租户 ID**。
+
+ 1. 使用搜索打开 **App registrations**。
+
+ 1. 找到你为 Rancher 创建的项。
+
+ 1. 复制 **Directory ID** 并将其作为 **Tenant ID** 粘贴到 Rancher 中。
+
+1. 获取你的 Rancher **Application (Client) ID**。
+
+ 1. 如果你还未在该位置,请使用搜索打开 **App registrations**。
+
+ 1. 在 **Overview**中,找到你为 Rancher 创建的条目。
+
+ 1. 复制 **Application (Client) ID** 并将其作为 **Application ID** 粘贴到 Rancher 中。
+
+1. 你的端点选项通常是 [Standard](#global) 或 [China](#中国)。对于这两个选项,你只需要输入 **Tenant ID**、**Application ID** 和 **Application Secret**。
+
+
+
+**对于自定义端点**:
+
+:::caution
+
+Rancher 未测试也未完全支持自定义端点。
+
+:::
+
+你还需要手动输入 Graph、Token 和 Auth Endpoints。
+
+- 从 App registrations 中,点击 Endpoints :
+
+
+
+- 以下端点将是你的 Rancher 端点值。请使用这些端点的 v1 版本。
+ - **Microsoft Graph API endpoint**(Graph 端点)
+ - **OAuth 2.0 token endpoint (v1)**(Token 端点)
+ - **OAuth 2.0 authorization endpoint (v1)** (Auth 端点)
+
+#### 5. 在 Rancher 中配置 Azure AD
+
+要完成配置,请在 Rancher UI 中输入你的 AD 实例信息。
+
+1. 登录到 Rancher。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+
+1. 在左侧导航栏,单击**认证**。
+
+1. 单击 **AzureAD**。
+
+1. 使用你在[复制 Azure 应用数据](#4-复制-azure-应用数据)时复制的信息,填写**配置 Azure AD 账号**的表单。
+
+ :::caution
+
+ Azure AD 帐户将被授予管理员权限,因为其详细信息将映射到 Rancher 本地主体帐户。在继续之前确保此权限级别是适当的。
+
+ :::
+
+ **对于标准或中国端点:**
+
+ 下表介绍了你在 Azure 门户中复制的值与 Rancher 中字段的映射:
+
+ | Rancher 字段 | Azure 值 |
+ | ------------------ | ------------------------------------- |
+ | 租户 ID | Directory ID |
+ | Application ID | Application ID |
+ | 应用密文 | Key Value |
+ | 端点 | https://login.microsoftonline.com/ |
+
+ **对于自定义端点**:
+
+ 下表将你的自定义配置值映射到 Rancher 字段:
+
+ | Rancher 字段 | Azure 值 |
+ | ------------------ | ------------------------------------- |
+ | Graph 端点 | Microsoft Graph API Endpoint |
+ | Token 端点 | OAuth 2.0 Token Endpoint |
+ | Auth 端点 | OAuth 2.0 Authorization Endpoint |
+
+ **重要提示**:在自定义配置中输入 Graph Endpoint 时,请从 URL 中删除 Tenant ID:
+
+ https://g raph.microsoft.com/abb5adde-bee8-4821-8b03-e63efdc7701c
+
+1. 点击**启用**。
+
+**结果**:Azure Active Directory 身份验证已配置。
+
+
+### 从 Azure AD Graph API 迁移到 Microsoft Graph API
+
+由于 [Azure AD Graph API](https://docs.microsoft.com/en-us/graph/migrate-azure-ad-graph-overview) 已弃用并计划于 2023 年 6 月停用,管理员应更新他们的 Azure AD 应用程序以在 Rancher 中使用 [Microsoft Graph API](https://docs.microsoft.com/en-us/graph/use-the-api)。
+你需要在端点弃用之前完成操作。
+如果在停用后 Rancher 仍配置为使用 Azure AD Graph API,用户可能无法使用 Azure AD 登录 Rancher。
+
+#### 在 Rancher UI 中更新端点
+
+:::caution
+
+管理员需要在迁移下述端点之前创建一个 [Rancher 备份](../../../new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md)。
+
+:::
+
+1. [更新](#3-设置-rancher-所需的权限) Azure AD 应用程序注册的权限。这个步骤非常关键。
+
+1. 登录到 Rancher。
+
+1. 在 Rancher UI 主页中,记下屏幕顶部的横幅,该横幅建议用户更新 Azure AD 身份验证。单击提供的链接以执行此操作。
+
+ 
+
+1. 要完成新的 Microsoft Graph API 迁移,请单击 **Update Endpoint**。
+
+ **注意**:在开始更新之前,请确保你的 Azure 应用程序具有[新的权限集](#3-设置-rancher-所需的权限)。
+
+ 
+
+1. 在收到弹出警告消息时,单击 **Update**:
+
+ 
+
+1. 有关 Rancher 执行的完整端点更改,请参阅下面的[表格](#global)。管理员不需要手动执行此操作。
+
+#### 离线环境
+
+在离线环境中,由于 Graph Endpoint URL 正在更改,因此管理员需要确保其端点被[列入白名单](#3.2)。
+
+#### 回滚迁移
+
+如果你需要回滚迁移,请注意以下事项:
+
+1. 如果管理员想要回滚,我们建议他们使用正确的恢复流程。有关参考信息,请参阅[备份文档](../../../new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md)、[恢复文档](../../../new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md)和[示例](../../../../reference-guides/backup-restore-configuration/examples.md)。
+
+1. 如果 Azure 应用程序所有者想要轮换应用程序密钥,他们也需要在 Rancher 中进行轮换(因为在 Azure 中更改应用程序密钥时,Rancher 不会自动更新应用程序密钥)。在 Rancher 中,它存储在名为 `azureadconfig-applicationsecret` 的 Kubernetes 密文中,该密文位于 `cattle-global-data` 命名空间中。
+
+:::caution
+
+如果你使用现有的 Azure AD 设置升级到 Rancher v2.7.0+,并选择了禁用认证提供程序,你将无法恢复以前的设置。你也无法使用旧流程设置 Azure AD。你需要使用新的认证流程重新注册。由于 Rancher 现在使用 Graph API,因此用户需要[在 Azure 门户中设置适当的权限](#3-设置-rancher-所需的权限)。
+
+:::
+
+
+#### Global:
+
+| Rancher 字段 | 已弃用的端点 |
+---------------- | -------------------------------------------------------------
+| Auth 端点 | https://login.microsoftonline.com/{tenantID}/oauth2/authorize |
+| 端点 | https://login.microsoftonline.com/ |
+| Graph 端点 | https://graph.windows.net/ |
+| Token 端点 | https://login.microsoftonline.com/{tenantID}/oauth2/token |
+
+| Rancher 字段 | 新端点 |
+---------------- | ------------------------------------------------------------------
+| Auth 端点 | https://login.microsoftonline.com/{tenantID}/oauth2/v2.0/authorize |
+| 端点 | https://login.microsoftonline.com/ |
+| Graph 端点 | https://graph.microsoft.com |
+| Token 端点 | https://login.microsoftonline.com/{tenantID}/oauth2/v2.0/token |
+
+#### 中国:
+
+| Rancher 字段 | 已弃用的端点 |
+---------------- | ----------------------------------------------------------
+| Auth 端点 | https://login.chinacloudapi.cn/{tenantID}/oauth2/authorize |
+| 端点 | https://login.chinacloudapi.cn/ |
+| Graph 端点 | https://graph.chinacloudapi.cn/ |
+| Token 端点 | https://login.chinacloudapi.cn/{tenantID}/oauth2/token |
+
+| Rancher 字段 | 新端点 |
+---------------- | -------------------------------------------------------------------------
+| Auth 端点 | https://login.partner.microsoftonline.cn/{tenantID}/oauth2/v2.0/authorize |
+| 端点 | https://login.partner.microsoftonline.cn/ |
+| Graph 端点 | https://microsoftgraph.chinacloudapi.cn |
+| Token 端点 | https://login.partner.microsoftonline.cn/{tenantID}/oauth2/v2.0/token |
+
+
+## 已弃用的 Azure AD Graph API
+
+> **重要提示**:
+>
+> - [Azure AD Graph API](https://docs.microsoft.com/en-us/graph/migrate-azure-ad-graph-overview) 已被弃用,Microsoft 将在 2023 年 6 月 30 日后随时停用它且不会另行通知。我们将更新我们的文档,以便在停用时向社区提供建议。Rancher 现在使用 [Microsoft Graph API](https://docs.microsoft.com/en-us/graph/use-the-api) 来将 Azure AD 设置为外部身份验证提供程序。
+>
+>
+> - 如果你是新用户或希望进行迁移,请参阅新的流程说明: Rancher v2.7.0+ 。
+>
+>
+> - 如果你不想在 Azure AD Graph API 停用后升级到 v2.7.0+,你需要:
+> - 使用内置的 Rancher 身份认证,或者
+> - 使用另一个第三方身份认证系统并在 Rancher 中进行设置。请参阅[身份验证文档](../../../../pages-for-subheaders/authentication-config.md),了解如何配置其他开放式身份验证提供程序。
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
new file mode 100644
index 00000000000..6f3100e462c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-freeipa.md
@@ -0,0 +1,60 @@
+---
+title: 配置 FreeIPA
+---
+
+如果你的组织使用 FreeIPA 进行用户身份验证,你可以通过配置 Rancher 来允许你的用户使用 FreeIPA 凭证登录。
+
+:::note 先决条件:
+
+- 你必须配置了 [FreeIPA 服务器](https://www.freeipa.org/)。
+- 在 FreeIPA 中创建一个具有 `read-only` 访问权限的 ServiceAccount 。当用户使用 API 密钥发出请求时,Rancher 使用此账号来验证组成员身份。
+- 参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+
+:::
+
+1. 使用分配了 `administrator` 角色(即 _本地主体_)的本地用户登录到 Rancher。
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **FreeIPA**。
+1. 填写**配置 FreeIPA 服务器**表单,
+
+ 你可能需要登录到域控制器,来查找表单中请求的信息。
+
+ :::note 使用 TLS?
+
+ 如果证书是自签名,或者不是来自公认的证书颁发机构的,请确保提供完整的证书链。Rancher 需要该链来验证服务器的证书。
+
+ :::
+
+ :::note 用户搜索库 vs. 组搜索库
+
+ 搜索库使 Rancher 可以搜索 FreeIPA 中的用户和组。这些字段仅用于搜索库,不适用于搜索筛选器。
+
+ * 如果你的用户和组位于同一搜索库中,则仅填写用户搜索库。
+ * 如果你的组位于其他搜索库中,则可以选择填写组搜索库。该字段专用于搜索组,但不是必需的。
+
+ :::
+
+1. 如果你的 FreeIPA 不同于标准的 AD Schema,则必须完成**自定义 Schema** 部分实现匹配。否则,调过此步骤。
+
+ :::note 搜索属性
+
+ `搜索属性`字段的默认值为三个特定值:`uid|sn|givenName`。配置 FreeIPA 后,当用户输入文本以添加用户或组时,Rancher 会自动查询 FreeIPA 服务器,并尝试按用户 ID,姓氏或名字来匹配字段。Rancher 专门搜索以在搜索字段中输入的文本开头的用户/组。
+
+ 默认字段值为 `uid|sn|givenName`,但是你可以将此字段配置为这些字段的子集。管道符 (`|`) 用于分隔各个字段。
+
+ * `uid`:用户 ID
+ * `sn`:姓
+ * `givenName`:名
+
+ Rancher 使用此搜索属性为用户和组创建搜索筛选器,但是你*不能*在此字段中添加自己的搜索筛选器。
+
+ :::
+
+1. 在 **Authenticate with FreeIPA** 中输入你的 FreeIPA 用户名和密码,确认已为 Rancher 配置 FreeIPA 身份验证。
+1. 点击**启用**。
+
+**结果**:
+
+- FreeIPA 验证配置成功。
+- 你将使用你的 FreeIPA 账号(即 _外部主体_)登录到 Rancher。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
new file mode 100644
index 00000000000..e903421611d
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-github.md
@@ -0,0 +1,56 @@
+---
+title: 配置 GitHub
+---
+
+在使用 GitHub 的环境中,你可以通过配置 Rancher 允许用户使用 GitHub 凭证登录。
+
+:::note 先决条件:
+
+参见[外部身份验证配置和主体用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+
+:::
+
+1. 使用分配了 `administrator` 角色(即 _本地主体_)的本地用户登录到 Rancher。
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **GitHub**。
+1. 按照显示的说明设置 GitHub 应用。Rancher 会将你重定向到 GitHub 完成注册。
+
+ :::note 什么是授权回调 URL?
+
+ 授权回调 URL 是用户开始使用你的应用(即初始屏幕)的 URL。
+
+ 使用外部身份验证时,实际上不会在你的应用中进行身份验证。相反,身份验证在外部进行(在本例中为 GitHub)。在外部身份验证成功完成后,用户将通过授权回调 URL 重新进入应用。
+
+ :::
+
+1. 从 GitHub 复制 **Client ID** 和 **Client Secret**。将它们粘贴到 Rancher 中。
+
+ :::note 在哪里可以找到 Client ID 和 Client Secret?
+
+ 在 GitHub 中,选择 **Settings > Developer Settings > OAuth Apps**。你可以在此处找到 Client ID 和 Client Secret。
+
+ :::
+
+1. 单击**使用 GitHub 认证**。
+
+1. 使用 **Site Access** 选项来配置用户授权的范围。
+
+ - **允许任何有效用户**
+
+ _任何_ GitHub 用户都能访问 Rancher。通常不建议使用此设置。
+
+ - **允许集群和项目成员,以及授权的用户和组织**
+
+ 添加为**集群成员**或**项目成员**的任何 GitHub 用户或组都可以登录 Rancher。此外,任何添加到**授权用户和组织**列表中的 GitHub 用户和组都能登录到 Rancher。
+
+ - **仅允许授权用户和组织**
+
+ 只有添加到**授权用户和组织**的 GitHub 用户和组能登录 Rancher。
+
+1. 点击**启用**。
+
+**结果**:
+
+- GitHub 验证配置成功。
+- 你将使用你的 GitHub 账号(即 _外部主体_)登录到 Rancher。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-google-oauth.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-google-oauth.md
new file mode 100644
index 00000000000..ca4d522ba35
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-google-oauth.md
@@ -0,0 +1,111 @@
+---
+title: 配置 Google OAuth
+---
+
+如果你的组织使用 G Suite 进行用户身份验证,你可以通过配置 Rancher 来允许你的用户使用 G Suite 凭证登录。
+
+只有 G Suite 域的管理员才能访问 Admin SDK。因此,只有 G Suite 管理员可以配置 Rancher 的 Google OAuth。
+
+在 Rancher 中,只有具有 **Manage Authentication** [全局角色](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md)的管理员或用户才能配置身份验证。
+
+## 先决条件
+
+- 你必须配置了 [G Suite 管理员账号](https://admin.google.com)。
+- G Suite 需要一个[顶级私有域 FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) 作为授权域。获取 FQDN 的一种方法是在 Route53 中为 Rancher Server 创建 A 记录。你不需要使用该记录更新 Rancher Server 的 URL 设置,因为可能有集群使用该 URL。
+- 你的 G Suite 域必须启用了 Admin SDK API。你可以按照[此页面](https://support.google.com/a/answer/60757?hl=en)中的步骤启用它。
+
+启用 Admin SDK API 后,你的 G Suite 域的 API 页面应如下图所示:
+
+
+## 在 Rancher 中为 OAuth 设置 G Suite
+
+在 Rancher 中设置 Google OAuth 之前,你需要登录到你的 G Suite 账号并完成以下设置:
+
+1. [在 G Suite 中将 Rancher 添加为授权域](#1-将-rancher-添加为授权域)
+1. [为 Rancher Server 生成 OAuth2 凭证](#2-为-rancher-server-生成-oauth2-凭证)
+1. [为 Rancher Server 创建 ServiceAccount 凭证](#3-创建-serviceaccount-凭证)
+1. [将 ServiceAccount 密钥注册成 OAuth Client](#4-将-serviceaccount-密钥注册成-oauth-client)
+
+### 1. 将 Rancher 添加为授权域
+
+1. 点击[此处](https://console.developers.google.com/apis/credentials)前往你的 Google 域的凭证页面。
+1. 选择你的项目,然后点击 **OAuth consent screen**。
+ 
+1. 前往 **Authorized Domains**,并在列表中输入你的 Rancher Server URL 的顶级私有域。顶级私有域是最右边的超级域。例如,`www.foo.co.uk` 是 `foo.co.uk` 的顶级私有域。有关顶级私有域的更多信息,请参见[此处](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains)。
+1. 前往 **Scopes for Google APIs**,并确保已启用 **email**,**profile** 和 **openid**。
+
+**结果**:Rancher 已被添加为 Admin SDK API 的授权域。
+
+### 2. 为 Rancher Server 生成 OAuth2 凭证
+
+1. 前往 Google API 控制台,选择你的项目并前往 [credentials ](https://console.developers.google.com/apis/credentials)页面。
+ 
+1. 在 **Create Credentials** 下拉框中,选择 **OAuth client ID**。
+1. 点击 **Web application**。
+1. 输入一个名称。
+1. 填写 **Authorized JavaScript origins** 和 **Authorized redirect URIs**。请注意,设置 Google OAuth 的 Rancher UI 页面(**Security > Authentication > Google** 下的全局视图)为你提供了这一步要输入的准确链接。
+- 在 **Authorized JavaScript origins** 处,输入你的 Rancher Server URL。
+- 在 **Authorized redirect URIs** 处,输入你的 Rancher Server 的 URL 并附加路径 `verify-auth`。例如,如果你的 URI 是 `https://rancherServer`,你需要输入 `https://rancherServer/verify-auth`。
+1. 点击 **Create**。
+1. 创建凭证之后,你将看到一个页面,其中显示你的凭证列表。选择刚刚创建的凭证,然后在最右边的行中单击 **Download JSON**。保存该文件,以便向 Rancher 提供这些凭证。
+
+**结果**:你已成功创建 OAuth 凭证。
+
+### 3. 创建 ServiceAccount 凭证
+由于 Google Admin SDK 只对管理员可用,普通用户不能使用它来检索其他用户或其组的配置文件。普通用户甚至不能检索他们自己的组。
+
+由于 Rancher 提供基于组的成员访问,我们要求用户能够获得自己的组,并在需要时查找其他用户和组。
+
+为了解决这个问题,G Suite 建议创建一个 ServiceAccount,并将你的 G Suite 域的权限委托给该 ServiceAccount。
+
+本节介绍如何:
+
+- 创建一个 ServiceAccount
+- 为 ServiceAccount 创建一个密钥并下载 JSON 格式的凭证
+
+1. 点击[此处](https://console.developers.google.com/iam-admin/serviceaccounts)并选择要生成 OAuth 凭证的项目。
+1. 点击 **Create Service Account**。
+1. 输入名称,并点击 **Create**。
+ 
+1. 不要在 **Service account permissions** 页面设置任何角色,然后单击 **Continue**。
+ 
+1. 点击 **Create Key** 并选择 JSON 选项。下载并保存 JSON 文件,以便你可以将其作为 ServiceAccount 凭证提供给 Rancher。
+ 
+
+**结果**:你的 ServiceAccount 已创建成功。
+
+### 4. 将 ServiceAccount 密钥注册成 OAuth Client
+
+你需要为在上一步中创建的 ServiceAccount 授予一些权限。Rancher 仅要求为用户和组授予只读权限。
+
+使用 ServiceAccount 密钥的唯一 ID,按照以下步骤将其注册为 Oauth Client:
+
+1. 获取你刚刚创建的密钥的唯一 ID。如果它没有显示在你创建的键旁边的键列表中,你需要先启用 Unique ID 列。点击 **Unique ID** 然后点击 **OK**。这将向 ServiceAccount 密钥列表中添加 **Unique ID** 列。保存你创建的 ServiceAccount 对应的唯一 ID。注意:这是一个数字 Key,不要与字母数字字段 **Key ID** 混淆。
+
+ 
+1. 前往 [**Domain-wide Delegation** 页面。](https://admin.google.com/ac/owl/domainwidedelegation)
+1. 在 **Client Name** 字段中添加上一步中获得的唯一 ID。
+1. 在 **One or More API Scopes** 字段中,添加以下作用域:
+ ```
+ openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly
+ ```
+1. 点击 **Authorize**。
+
+**结果**:ServiceAccount 在你的 G Suite 账号中已注册为 OAuth 客户端。
+
+## 在 Rancher 中配置 Google OAuth
+
+1. 使用分配了 [administrator](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) 角色的本地用户登录到 Rancher。这个用户也称为本地主体。
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **Google**。UI 中的说明介绍了使用 Google OAuth 设置身份验证的步骤。
+ 1. 管理员邮箱:提供 GSuite 设置中的管理员账户的电子邮箱。为了查找用户和组,Google API 需要管理员的电子邮件和 ServiceAccount 密钥。
+ 1. 域名:提供配置了 G Suite 的域。请提供准确的域,而不是别名。
+ 1. 属于多个用户组的用户:选中此框以启用嵌套组成员关系。Rancher 管理员可以在配置认证后的任何时候禁用它。
+ - **步骤一**是将 Rancher 添加为授权域(详情请参见[本节](#1-将-rancher-添加为授权域))。
+ - **步骤二**提供你完成[本节](#2-为-rancher-server-生成-oauth2-凭证)后下载的 OAuth 凭证 JSON。你可以上传文件或将内容粘贴到 **OAuth Credentials** 字段。
+ - **步骤三**提供在[本节](#3-创建-serviceaccount-凭证)末尾下载的 ServiceAccount 凭证 JSON。仅当你成功[在 G Suite 账号中将 ServiceAccount 密钥注册为 OAuth Client](#4-将-serviceaccount-密钥注册成-oauth-client) 后,凭证才能正常工作。
+1. 点击**使用 Google 认证**。
+1. 点击**启用**。
+
+**结果**:Google 验证配置成功。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-oidc.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-oidc.md
new file mode 100644
index 00000000000..86dff79d0af
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-oidc.md
@@ -0,0 +1,145 @@
+---
+title: 配置 Keycloak (OIDC)
+description: 创建 Keycloak OpenID Connect (OIDC) 客户端并配置 Rancher 以使用 Keycloak。你的用户将能够使用他们的 Keycloak 登录名登录 Rancher。
+---
+
+如果你的组织使用 [Keycloak Identity Provider (IdP)](https://www.keycloak.org) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 IdP 凭证登录。Rancher 支持使用 OpenID Connect (OIDC) 协议和 SAML 协议来集成 Keycloak。与 Rancher 一起使用时,这两种实现在功能上是等效的。本文描述了配置 Rancher 以通过 OIDC 协议与 Keycloak 一起使用的流程。
+
+如果你更喜欢将 Keycloak 与 SAML 协议一起使用,请参见[此页面](configure-keycloak-saml.md)。
+
+如果你有使用 SAML 协议的现有配置并希望切换到 OIDC 协议,请参见[本节](#从-saml-迁移到-oidc)。
+
+## 先决条件
+
+- 已在 Rancher 上禁用 Keycloak (SAML)。
+- 你必须配置了 [Keycloak IdP 服务器](https://www.keycloak.org/guides#getting-started)。
+- 在 Keycloak 中,使用以下设置创建一个[新的 OIDC 客户端](https://www.keycloak.org/docs/latest/server_admin/#oidc-clients)。如需获取帮助,请参见 [Keycloak 文档](https://www.keycloak.org/docs/latest/server_admin/#oidc-clients)。
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Client ID` | <CLIENT_ID> (例如 `rancher`) |
+ | `Name` | <CLIENT_NAME> (例如 `rancher`) |
+ | `Client Protocol` | `openid-connect` |
+ | `Access Type` | `confidential` |
+ | `Valid Redirect URI` | `https://yourRancherHostURL/verify-auth` |
+
+- 在新的 OIDC 客户端中,创建 [Mappers](https://www.keycloak.org/docs/latest/server_admin/#_protocol-mappers) 来公开用户字段。
+ - 使用以下设置创建一个新的 "Groups Mapper":
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Name` | `Groups Mapper` |
+ | `Mapper Type` | `Group Membership` |
+ | `Token Claim Name` | `groups` |
+ | `Add to ID token` | `OFF` |
+ | `Add to access token` | `OFF` |
+ | `Add to user info` | `ON` |
+
+ - 使用以下设置创建一个新的 "Client Audience" :
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Name` | `Client Audience` |
+ | `Mapper Type` | `Audience` |
+ | `Included Client Audience` | <CLIENT_NAME> |
+ | `Add to access token` | `ON` |
+
+ - 使用以下设置创建一个新的 "Groups Path":
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Name` | `Group Path` |
+ | `Mapper Type` | `Group Membership` |
+ | `Token Claim Name` | `full_group_path` |
+ | `Full group path` | `ON` |
+ | `Add to user info` | `ON` |
+
+## 在 Rancher 中配置 Keycloak
+
+1. 在 Rancher UI 中,单击 **☰ > 用户 & 认证**。
+1. 单击左侧导航栏的**认证**。
+1. 选择 **Keycloak (OIDC)**。
+1. 填写**配置 Keycloak OIDC 账号**表单。有关填写表单的帮助,请参见[配置参考](#配置参考)。
+1. 完成**配置 Keycloak OIDC 账号**表单后,单击**启用**。
+
+ Rancher 会将你重定向到 IdP 登录页面。输入使用 Keycloak IdP 进行身份验证的凭证,来验证你的 Rancher Keycloak 配置。
+
+ :::note
+
+ 你可能需要禁用弹出窗口阻止程序才能看到 IdP 登录页面。
+
+ :::
+
+**结果**:已将 Rancher 配置为使用 OIDC 协议与 Keycloak 一起工作。你的用户现在可以使用 Keycloak 登录名登录 Rancher。
+
+## 配置参考
+
+| 字段 | 描述 |
+| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 客户端 ID | 你的 Keycloak 客户端的 `Client ID`。 |
+| 客户端密码 | 你的 Keycloak 客户端生成的 `Secret`。在 Keycloak 控制台中,单击 **Clients**,选择你创建的客户端,选择 **Credentials** 选项卡,然后复制 `Secret` 字段的值。 |
+| 私钥/证书 | 在 Rancher 和你的 IdP 之间创建安全外壳(SSH)的密钥/证书对。如果你的 Keycloak 服务器上启用了 HTTPS/SSL,则为必填。 |
+| 端点 | 选择为 `Rancher URL`、`发行者`和 `Auth 端点`字段使用生成的值,还是在不正确时进行手动覆盖。 |
+| Keycloak URL | 你的 Keycloak 服务器的 URL。 |
+| Keycloak Realm | 创建 Keycloak 客户端的 Realm 的名称。 |
+| Rancher URL | Rancher Server 的 URL。 |
+| Issuer | 你的 IdP 的 URL。 |
+| Auth 端点 | 重定向用户进行身份验证的 URL。 |
+
+## 从 SAML 迁移到 OIDC
+
+本节描述了将使用 Keycloak (SAML) 的 Rancher 过渡到 Keycloak (OIDC) 的过程。
+
+### 重新配置 Keycloak
+
+1. 将现有客户端更改为使用 OIDC 协议。在 Keycloak 控制台中,单击 **Clients**,选择要迁移的 SAML 客户端,选择 **Settings** 选项卡,将 `Client Protocol` 从 `saml` 更改为 `openid-connect`,然后点击 **Save**。
+
+1. 验证 `Valid Redirect URIs` 是否仍然有效。
+
+1. 选择 **Mappers** 选项卡并使用以下设置创建一个新的 Mapper:
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Name` | `Groups Mapper` |
+ | `Mapper Type` | `Group Membership` |
+ | `Token Claim Name` | `groups` |
+ | `Add to ID token` | `ON` |
+ | `Add to access token` | `ON` |
+ | `Add to user info` | `ON` |
+
+### 重新配置 Rancher
+
+在将 Rancher 配置为使用 Keycloak (OIDC) 之前,必须先禁用 Keycloak (SAML):
+
+1. 在 Rancher UI 中,单击 **☰ > 用户 & 认证**。
+1. 单击左侧导航栏的**认证**。
+1. 选择 **Keycloak (SAML)**。
+1. 单击**禁用**。
+
+按照[本节](#在-rancher-中配置-keycloak)中的步骤将 Rancher 配置为使用 Keycloak (OIDC)。
+
+:::note
+
+配置完成后,由于用户权限不会自动迁移,你需要重新申请 Rancher 用户权限。
+
+:::
+
+## 附录:故障排除
+
+如果你在测试与 Keycloak 服务器的连接时遇到问题,请先检查 OIDC 客户端的配置选项。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
+
+所有与 Keycloak 相关的日志条目都将添加 `[generic oidc]` 或 `[keycloak oidc]`。
+
+### 不能重定向到 Keycloak
+
+完成**配置 Keycloak OIDC 账号**表单并单击**启用**后,你没有被重定向到你的 IdP。
+
+* 验证你的 Keycloak 客户端配置。
+
+### 生成的 `Issuer` 和 `Auth 端点`不正确
+
+* 在**配置 Keycloak OIDC 账号**表单中,将**端点**更改为`指定(高级设置)`并覆盖`发行者` 和 `Auth 端点`的值。要查找这些值,前往 Keycloak 控制台并选择 **Realm Settings**,选择 **General** 选项卡,然后单击 **OpenID Endpoint Configuration**。JSON 输出将显示 `issuer` 和 `authorization_endpoint` 的值。
+
+### Keycloak 错误:"Invalid grant_type"
+
+* 在某些情况下,这条错误提示信息可能有误导性,实际上造成错误的原因是 `Valid Redirect URI` 配置错误。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-saml.md
new file mode 100644
index 00000000000..145e327f3af
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-saml.md
@@ -0,0 +1,190 @@
+---
+title: 配置 Keycloak (SAML)
+description: 创建 Keycloak SAML 客户端并配置 Rancher 以使用 Keycloak。你的用户将能够使用他们的 Keycloak 登录名登录 Rancher。
+---
+
+如果你的组织使用 Keycloak Identity Provider (IdP) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 IdP 凭证登录。
+
+## 先决条件
+
+- 你必须配置了 [Keycloak IdP 服务器](https://www.keycloak.org/guides#getting-started)。
+- 在 Keycloak 中,使用以下设置创建一个[新的 SAML 客户端](https://www.keycloak.org/docs/latest/server_admin/#saml-clients)。如需获取帮助,请参见 [Keycloak 文档](https://www.keycloak.org/docs/latest/server_admin/#saml-clients)。
+
+ | 设置 | 值 |
+ ------------|------------
+ | `Sign Documents` | `ON` 1 |
+ | `Sign Assertions` | `ON` 1 |
+ | 所有其他 `ON/OFF` 设置 | `OFF` |
+ | `Client ID` | 输入 `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata`,或在 Rancher Keycloak 配置2 中 `Entry ID 字段`的值。 |
+ | `Client Name` | (例如 `rancher`) |
+ | `Client Protocol` | `SAML` |
+ | `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` |
+
+ > 1 :可以选择启用这些设置中的一个或两个。
+ > 2 :在配置和保存 SAML 身份提供商之前,不会生成 Rancher SAML 元数据。
+
+ 
+
+- 在新的 SAML 客户端中,创建 Mappers 来公开用户字段。
+ - 添加所有 "Builtin Protocol Mappers"
+ 
+ - 创建一个 "Group list" mapper,来将成员属性映射到用户的组:
+ 
+
+## 获取 IDP 元数据
+
+
+
+
+要获取 IDP 元数据,请从 Keycloak 客户端导出 `metadata.xml` 文件。
+在**安装**选项卡中,选择**SAML 元数据 IDPSSODescriptor** 格式选项并下载你的文件。
+
+
+
+
+1. 在**配置**中,单击 **Realm 设置**选项卡。
+1. 点击**通用**选项卡。
+1. 在**端点**字段中,单击 **SAML 2.0 身份提供者元数据**。
+
+验证 IDP 元数据是否包含以下属性:
+
+```
+xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"
+xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
+xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
+```
+
+某些浏览器(例如 Firefox)可能会渲染/处理文档,使得内容看起来已被修改,并且某些属性看起来可能有缺失。在这种情况下,请使用通过浏览器找到的原始响应数据。
+
+以下是 Firefox 的示例流程,其他浏览器可能会略有不同:
+
+1. 按下 **F12** 访问开发者控制台。
+1. 点击 **Network** 选项卡。
+1. 从表中,单击包含 `descriptor` 的行。
+1. 在 details 窗格中,单击 **Response** 选项卡。
+1. 复制原始响应数据。
+
+获得的 XML 以 `EntitiesDescriptor` 作为根元素。然而,Rancher 希望根元素是 `EntityDescriptor` 而不是 `EntitiesDescriptor`。因此,在将这个 XML 传递给 Rancher 之前,请按照以下步骤调整:
+
+1. 将所有不存在的属性从 `EntitiesDescriptor` 复制到 `EntityDescriptor`。
+1. 删除开头的 `` 标签。
+1. 删除 xml 末尾的 ` `。
+
+最后的代码会是如下:
+
+```
+
+....
+
+```
+
+
+
+
+1. 在**配置**中,单击 **Realm 设置**选项卡。
+1. 点击**通用**选项卡。
+1. 在**端点**字段中,单击 **SAML 2.0 身份提供者元数据**。
+
+验证 IDP 元数据是否包含以下属性:
+
+```
+xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"
+xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
+xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
+```
+
+某些浏览器(例如 Firefox)可能会渲染/处理文档,使得内容看起来已被修改,并且某些属性看起来可能有缺失。在这种情况下,请使用通过浏览器找到的原始响应数据。
+
+以下是 Firefox 的示例流程,其他浏览器可能会略有不同:
+
+1. 按下 **F12** 访问开发者控制台。
+1. 点击 **Network** 选项卡。
+1. 从表中,单击包含 `descriptor` 的行。
+1. 在 details 窗格中,单击 **Response** 选项卡。
+1. 复制原始响应数据。
+
+
+
+
+## 在 Rancher 中配置 Keycloak
+
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **Keycloak SAML**。
+1. 填写**配置 Keycloak 账号**表单。有关填写表单的帮助,请参见[配置参考](#配置参考)。
+1. 完成**配置 Keycloak 账号**表单后,单击**启用**。
+
+ Rancher 会将你重定向到 IdP 登录页面。输入使用 Keycloak IdP 进行身份验证的凭证,来验证你的 Rancher Keycloak 配置。
+
+ :::note
+
+ 你可能需要禁用弹出窗口阻止程序才能看到 IdP 登录页面。
+
+ :::
+
+**结果**:已将 Rancher 配置为使用 Keycloak。你的用户现在可以使用 Keycloak 登录名登录 Rancher。
+
+:::note SAML 身份提供商注意事项
+
+- SAML 协议不支持搜索或查找用户或组。因此,将用户或组添加到 Rancher 时不会对其进行验证。
+- 添加用户时,必须正确输入确切的用户 ID(即 `UID` 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
+- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
+- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
+
+:::
+
+## 配置参考
+
+| 字段 | 描述 |
+| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 显示名称字段 | 包含用户显示名称的属性。 示例:`givenName` |
+| 用户名字段 | 包含用户名/给定名称的属性。 示例:`email` |
+| UID 字段 | 每个用户独有的属性。 示例:`email` |
+| 用户组字段 | 创建用于管理组成员关系的条目。 示例:`member` |
+| Entity ID 字段 | Keycloak 客户端中需要配置为客户端的 ID。 默认值:`https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` |
+| Rancher API 主机 | Rancher Server 的 URL。 |
+| 私钥/证书 | 在 Rancher 和你的 IdP 之间创建安全外壳(SSH)的密钥/证书对。 |
+| IDP 元数据 | 从 IdP 服务器导出的 `metadata.xml` 文件。 |
+
+:::tip
+
+你可以使用 openssl 命令生成一个密钥/证书对。例如:
+
+openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert
+
+:::
+
+## 附录:故障排除
+
+如果你在测试与 Keycloak 服务器的连接时遇到问题,请先检查 SAML 客户端的配置选项。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)。
+
+### 不能重定向到 Keycloak
+
+点击**使用 Keycloak 认证**时,没有重定向到你的 IdP。
+
+* 验证你的 Keycloak 客户端配置。
+* 确保 `Force Post Binding` 设为 `OFF`。
+
+
+### IdP 登录后显示禁止消息
+
+你已正确重定向到你的 IdP 登录页面,并且可以输入凭证,但是之后收到 `Forbidden` 消息。
+
+* 检查 Rancher 调试日志。
+* 如果日志显示 `ERROR: either the Response or Assertion must be signed`,确保 `Sign Documents` 或 `Sign assertions` 在 Keycloak 客户端中设置为 `ON`。
+
+### 访问 `/v1-saml/keycloak/saml/metadata` 时返回 HTTP 502
+
+常见原因:配置 SAML 提供商之前未创建元数据。
+尝试配置 Keycloak,并将它保存为你的 SAML 提供商,然后访问元数据。
+
+### Keycloak 错误:"We're sorry, failed to process response"
+
+* 检查你的 Keycloak 日志。
+* 如果日志显示 `failed: org.keycloak.common.VerificationException: Client does not have a public key`,请在 Keycloak 客户端中将 `Encrypt Assertions` 设为 `OFF`。
+
+### Keycloak 错误:"We're sorry, invalid requester"
+
+* 检查你的 Keycloak 日志。
+* 如果日志显示 `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`,请在 Keycloak 客户端中将 `Client Signature Required` 设为 `OFF`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
new file mode 100644
index 00000000000..212f7cf45d1
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-okta-saml.md
@@ -0,0 +1,107 @@
+---
+title: 配置 Okta (SAML)
+---
+
+如果你的组织使用 Okta Identity Provider (IdP) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 IdP 凭证登录。
+
+:::note
+
+Okta 集成仅支持服务提供商发起的登录。
+
+:::
+## 先决条件
+
+在 Okta 中,使用以下设置创建一个新的 SAML 应用。如需获取帮助,请参见 [Okta 文档](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta)。
+
+| 设置 | 值 |
+------------|------------
+| `Single Sign on URL` | `https://yourRancherHostURL/v1-saml/okta/saml/acs` |
+| `Audience URI (SP Entity ID)` | `https://yourRancherHostURL/v1-saml/okta/saml/metadata` |
+
+## 在 Rancher 中配置 Okta
+
+你可以将 Okta 与 Rancher 集成,以便经过身份认证的用户通过组权限访问 Rancher 资源。Okta 会返回一个对用户进行身份认证的 SAML 断言,包括用户所属的组。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **Okta**。
+1. 填写**配置 Okta 账号**表单。下面的示例描述了如何将 Okta 属性从属性语句映射到 Rancher 中的字段:
+
+ | 字段 | 描述 |
+ | ------------------------- | ----------------------------------------------------------------------------- |
+ | 显示名称字段 | 属性语句中包含用户显示名称的属性名称。 |
+ | 用户名字段 | 属性语句中包含用户名/给定名称的属性名称。 |
+ | UID 字段 | 属性语句中每个用户唯一的属性名称。 |
+ | 用户组字段 | 组属性语句中公开你的组的属性名称。 |
+ | Rancher API 主机 | Rancher Server 的 URL。 |
+ | 私钥/证书 | 密钥/证书对,用于断言加密。 |
+ | 元数据 XML | 你在应用 `Sign On` 部分中找到的 `Identity Provider metadata` 文件。 |
+
+ :::tip
+
+ 你可以使用 openssl 命令生成一个密钥/证书对。例如:
+
+ ```
+ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.crt
+ ```
+
+ :::
+
+
+1. 完成**配置 Okta 账号**表单后,单击**启用**。
+
+ Rancher 会将你重定向到 IdP 登录页面。输入使用 Okta IdP 进行身份验证的凭证,来验证你的 Rancher Okta 配置。
+
+ :::note
+
+ 如果什么都没有发生,很可能是因为你的浏览器阻止了弹出窗口。请在弹出窗口阻止程序中禁用 Rancher 域,并在其他类似扩展中将 Rancher 列入白名单。
+
+ :::
+
+**结果**:已将 Rancher 配置为使用 Okta。你的用户现在可以使用 Okta 登录名登录 Rancher。
+
+:::note SAML 身份提供商注意事项
+
+如果你在没有 OpenLDAP 的情况下配置 Okta,你将无法搜索或直接查找用户或组。相关的警告如下:
+
+- 在 Rancher 中为用户和组分配权限时将不会验证用户和组。
+- 添加用户时,必须正确输入确切的用户 ID(即 `UID` 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
+- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
+- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
+
+:::
+
+## Okta 与 OpenLDAP 搜索
+
+你可以添加 OpenLDAP 后端来协助用户和组搜索。Rancher 将显示来自 OpenLDAP 服务的其他用户和组。这允许你将权限分配给用户不属于的组。
+
+### OpenLDAP 先决条件
+
+如果你使用 Okta 作为 IdP,你可以[设置 LDAP 接口](https://help.okta.com/en-us/Content/Topics/Directory/LDAP-interface-main.htm)以供 Rancher 使用。你还可以配置外部 OpenLDAP Server。
+
+你必须使用 LDAP 绑定帐户(也称为 ServiceAccount)来配置 Rancher,以便搜索和检索应具有访问权限的用户和组的 LDAP 条目。不要使用管理员帐户或个人帐户作为 LDAP 绑定帐户。在 OpenLDAP 中[创建](https://help.okta.com/en-us/Content/Topics/users-groups-profiles/usgp-add-users.htm)一个专用帐户,对搜索库下的用户和组具有只读访问权限。
+
+:::warning 安全注意事项
+
+OpenLDAP ServiceAccount 用于所有搜索。无论用户个人的 SAML 权限是什么,Rancher 用户都将看到 OpenLDAP ServiceAccount 可以查看的用户和组。
+
+:::
+
+
+> **使用 TLS?**
+>
+> 如果 OpenLDAP Server 使用的证书是自签名的或来自无法识别的证书颁发机构,则 Rancher 需要 PEM 格式的 CA 证书(包含所有中间证书)。你需要在配置期间提供此证书,以便 Rancher 能够验证证书链。
+
+### 在 Rancher 中配置 OpenLDAP
+
+[配置 OpenLDAP Server、组和用户的设置](../configure-openldap/openldap-config-reference.md)。请注意,不支持嵌套组成员。
+
+> 在继续配置之前,请熟悉[外部身份认证配置和主要用户](../../../../pages-for-subheaders/authentication-config.md#外部身份验证配置和用户主体)。
+
+1. 使用分配了 [administrator](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions) 角色(即 _本地主体_)的本地用户登录到 Rancher。
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **Okta**,如果已配置 SAML,则单击**编辑配置**。
+1. 在**用户和组搜索**下,选中**配置 OpenLDAP Server**。
+
+如果你在测试与 OpenLDAP Server 的连接时遇到问题,请确保你输入了ServiceAccount 的凭证并正确配置了搜索库。你可以检查 Rancher 日志来查明根本原因。调试日志可能包含有关错误的更详细信息。请参阅[如何启用调试日志](../../../../faq/technical-items.md#如何启用调试日志记录)了解更多信息。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-pingidentity.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-pingidentity.md
new file mode 100644
index 00000000000..0eebb8363c9
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-pingidentity.md
@@ -0,0 +1,62 @@
+---
+title: 配置 PingIdentity (SAML)
+---
+
+如果你的组织使用 Ping Identity Provider (IdP) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 IdP 凭证登录。
+
+> **先决条件**:
+>
+> - 你必须配置了 [Ping IdP 服务器](https://www.pingidentity.com/)。
+> - 以下是 Rancher Service Provider 配置所需的 URL:
+> 元数据 URL:`https:///v1-saml/ping/saml/metadata`
+> 断言使用者服务 (ACS) URL:`https:///v1-saml/ping/saml/acs`
+> 请注意,在 Rancher 中保存验证配置之前,这些 URL 不会返回有效数据。
+> - 从 IdP 服务器导出 `metadata.xml` 文件。详情请参见 [PingIdentity 文档](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html)。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **Ping Identity**。
+1. 填写**配置 Ping 账号**表单。Ping IdP 允许你指定要使用的数据存储。你可以添加数据库或使用现有的 ldap 服务器。例如,如果你选择 Active Directory (AD) 服务器,下面的示例将描述如何将 AD 属性映射到 Rancher 中的字段:
+
+ 1. **显示名称字段**:包含用户显示名称的 AD 属性(例如:`displayName`)。
+
+ 1. **用户名字段**:包含用户名/给定名称的 AD 属性(例如:`givenName`)。
+
+ 1. **UID 字段**:每个用户唯一的 AD 属性(例如:`sAMAccountName`、`distinguishedName`)。
+
+ 1. **用户组字段**: 创建用于管理组成员关系的条目(例如:`memberOf`)。
+
+ 1. **Entity ID 字段**(可选):你的合作伙伴已公布的、依赖协议的、唯一的标识符。该 ID 将你的组织定义为将服务器用于 SAML 2.0 事务的实体。这个 ID 可以通过带外传输或 SAML 元数据文件获得。
+
+ 1. **Rancher API 主机**:你的 Rancher Server 的 URL。
+
+ 1. **私钥**和**证书**:密钥/证书对,用于在 Rancher 和你的 IdP 之间创建一个安全外壳(SSH)。
+
+ 你可以使用 openssl 命令进行创建。例如:
+
+ ```
+ openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
+ ```
+ 1. **IDP 元数据**:[从 IdP 服务器导出的 `metadata.xml` 文件](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html)。
+
+
+1. 完成**配置 Ping 账号**表单后,单击**启用**。
+
+ Rancher 会将你重定向到 IdP 登录页面。输入使用 Ping IdP 进行身份验证的凭证,来验证你的 Rancher PingIdentity 配置。
+
+ :::note
+
+ 你可能需要禁用弹出窗口阻止程序才能看到 IdP 登录页面。
+
+ :::
+
+**结果**:已将 Rancher 配置为使用 PingIdentity。你的用户现在可以使用 PingIdentity 登录名登录 Rancher。
+
+:::note SAML 身份提供商注意事项
+
+- SAML 协议不支持搜索或查找用户或组。因此,将用户或组添加到 Rancher 时不会对其进行验证。
+- 添加用户时,必须正确输入确切的用户 ID(即 `UID` 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
+- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
+- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
+
+:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/create-local-users.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/create-local-users.md
new file mode 100644
index 00000000000..e6589e67693
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/create-local-users.md
@@ -0,0 +1,15 @@
+---
+title: 本地身份验证
+---
+
+在配置外部验证提供程序之前,你将默认使用本地身份验证。Rancher 将用户帐户信息(例如用户名和密码)存储在本地。默认情况下,用于首次登录 Rancher 的 `admin` 用户就是一个本地用户。
+
+## 添加本地用户
+
+无论是否使用外部身份验证服务,你都应创建一些本地身份认证的用户,以便在外部验证服务遇到问题时继续使用 Rancher。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**用户**。
+1. 单击**创建**。
+1. 完成**添加用户**的表单。
+1. 单击**创建**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
new file mode 100644
index 00000000000..0fe0a77cebb
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md
@@ -0,0 +1,77 @@
+---
+title: 用户和组
+---
+
+Rancher 依赖用户和组来决定允许登录到 Rancher 的用户,以及他们可以访问哪些资源。你配置外部身份验证提供程序后,该提供程序的用户将能够登录到你的 Rancher Server。用户登录时,验证提供程序将向你的 Rancher Server 提供该用户所属的组列表。
+
+你可以通过向资源添加用户或组,来控制其对集群、项目、全局 DNS 提供程序和相关资源的访问。将组添加到资源时,身份验证提供程序中属于该组的所有用户都将能够使用组的权限访问该资源。有关角色和权限的更多信息,请参见 [RBAC](../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md)。
+
+## 管理成员
+
+向资源添加用户或用户组时,你可以通过输入用户或组的名称来搜索用户或组。Rancher Server 会查询身份验证提供程序,来查找与你输入的内容匹配的用户和组。搜索仅限于你登录时使用的身份验证提供程序。例如,如果你启用了 GitHub 身份验证,但使用[本地](create-local-users.md)用户登录,则无法搜索 GitHub 用户或组。
+
+你可以查看和管理所有用户,包括本地用户和来自身份验证提供程序的用户。在左上角,单击 **☰ > 用户 & 认证**。在左侧导航栏中单击**用户**。
+
+:::note SAML 身份提供商注意事项
+
+- SAML 协议不支持搜索或查找用户或组。因此,将用户或组添加到 Rancher 时不会对其进行验证。
+- 添加用户时,必须正确输入确切的用户 ID(即 `UID` 字段)。键入用户 ID 时,将不会搜索可能匹配的其他用户 ID。
+- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
+- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
+
+:::
+
+## 用户信息
+
+Rancher 会维护通过身份验证提供程序登录的每个用户的信息,包括用户是否允许访问 Rancher Server,以及用户所属的组的列表。Rancher 保留此用户信息,以便 CLI、API 和 kubectl 能够准确地反映用户基于身份验证提供程序中的组成员关系的访问。
+
+当用户使用身份验证提供程序登录到 UI 时,Rancher 将自动更新该用户信息。
+
+### 自动刷新用户信息
+
+Rancher 会定期刷新用户信息,甚至在用户通过 UI 登录之前也是如此。你可以控制 Rancher 执行刷新的频率。
+
+有两个参数可以控制这个操作:
+
+- **`auth-user-info-max-age-seconds`**
+
+ 此设置控制用户信息的最大老化时间,如果超过这个时间,Rancher 就会刷新信息。如果用户进行 API 调用(直接 UI 访问或通过使用 Rancher CLI 或 kubectl 调用),并且与 Rancher 上次刷新用户信息的时间间隔大于此设置,则 Rancher 将触发刷新。此设置默认为 `3600` 秒,即 1 小时。
+
+- **`auth-user-info-resync-cron`**
+
+ 此设置控制用于所有用户重新同步身份验证提供程序信息的定期任务周期。无论用户最近是否登录或使用 API,自动刷新任务都会按照指定的时间间隔刷新用户信息。此设置默认为 `0 0 * * *`,即每天午夜进行一次。有关此设置的有效值的更多信息,请参见 [Cron 文档](https://en.wikipedia.org/wiki/Cron)。
+
+如果需要更改设置:
+
+1. 在左上角,单击 **☰ > 全局设置**。
+1. 前往你需要配置的设置,并点击 **⋮ > 编辑设置**。
+
+:::note
+
+由于 SAML 不支持用户查找,因此基于 SAML 的身份验证提供程序不支持定期刷新用户信息。只有当用户登录到 Rancher UI 时,才会刷新用户信息。
+
+:::
+### 手动刷新用户信息
+
+如果你不确定 Rancher 上一次执行用户信息自动刷新的时间,则可以通过手动刷新来刷新所有用户的信息。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在**用户**页面,单击**刷新用户组成员名单**。
+
+**结果**:Rancher 刷新了所有用户的信息。请求此刷新将更新哪些用户可以访问 Rancher 以及每个用户所属的所有组。
+
+:::note
+
+由于 SAML 不支持用户查找,因此基于 SAML 的身份验证提供程序不支持手动刷新用户信息。只有当用户登录到 Rancher UI 时,才会刷新用户信息。
+
+:::
+
+## 会话周期
+
+用户会话的默认生命周期(TTL)是可调的。默认的会话周期是 16 小时。
+
+1. 在左上角,单击 **☰ > 全局设置**。
+1. 前往 **`auth-user-session-ttl-minutes`** 并单击**⋮ > 编辑设置**。
+1. 输入会话应该持续的时间(以分钟为单位),然后单击**Save**。
+
+**结果**:用户的 Rancher 登录会话在设定的分钟数后自动退出。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md
new file mode 100644
index 00000000000..5838ed66fed
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-permissions-and-global-configuration.md
@@ -0,0 +1,85 @@
+---
+title: 认证、权限和全局配置
+---
+
+
+
+
+
+安装完成后,[系统管理员](manage-role-based-access-control-rbac/global-permissions.md) 应该通过 Rancher 配置认证、授权、安全性、默认设置、安全策略、驱动和全局 DNS 条目。
+
+## 首次登录
+
+首次登录 Rancher 后,Rancher 会提示你输入 **Rancher Server URL**,你应该将 URL 设置为访问 Rancher Server 的主入口点。当负载均衡器运行在 Rancher Server 集群前面时,URL 应该设置为负载均衡地址。系统会自动尝试根据运行 Rancher Server 的主机 IP 地址或主机名推断 Rancher Server URL,但只有当 Rancher Server 以单节点方式安装时才有效。因此在大多数情况下,你都需要将 Rancher Server URL 设置为正确的值。
+
+:::danger
+
+当设置完 Rancher Server URL 后,我们不支持修改它。请格外小心的设置此项配置。
+
+:::
+
+## 认证
+
+Rancher 为 Kubernetes 增加了一项关键特性是集中式的用户认证。此特性允许设置本地用户和/或连接到外部认证程序。通过连接到外部认证程序,你可以使用该程序提供的用户和组。
+
+更多关于认证的工作原理以及如何配置对接各个认证程序,请参考[认证](authentication-config/authentication-config.md)。
+
+## 授权
+
+在 Rancher 中,每个人都是以 _用户_ 的身份进行鉴权,这是一个授予你访问 Rancher 的登录身份。用户登录 Rancher 后,他们的 _授权_ 或者他们在系统中的访问权限由用户的角色决定。Rancher 提供了内置的角色,允许你你轻松地配置用户对资源的权限,但是 Rancher 还提供了为每个 Kubernetes 资源自定义角色的功能。
+
+更多关于授权的工作原理以及自定义角色的使用,请参考 [RBAC](manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md)。
+
+## Pod 安全策略
+
+_Pod 安全策略_ (或 PSPs) 是控制 Pod 安全敏感方面规范的对象,例如 root 权限。如果一个 Pod 不满足 PSP 中指定的条件,Kubernetes 将不允许 Pod 启动,同时 Rancher 会显示一条错误信息。
+
+更多关于如何创建和使用 PSPs 的内容,请参考 [Pod 安全策略](create-pod-security-policies.md)。
+
+## Provisioning Drivers
+
+Rancher 中的驱动允许你管理哪些程序可以预置[托管的 Kubernetes 集群](../kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md) 或 [云服务器节点](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md),允许 Rancher 部署和管理 Kubernetes。
+
+更多信息请参考 [Provisioning Drivers](about-provisioning-drivers/about-provisioning-drivers.md)。
+
+## 添加 Kubernetes 版本到 Rancher 中
+
+使用此功能,你可以在最新版本的 Kubernetes 发布后立即升级,而不需要升级 Rancher。此功能允许你轻松升级 Kubernetes 的补丁版本(例如 `v1.15.X`),但不打算升级 Kubernetes 的次要版本(例如 `v1.X.0`),因为 Kubernetes 倾向于在次要版本之间弃用或添加 API。
+
+Rancher 用于配置 [RKE 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md) 的信息现在存储于 Rancher Kubernetes 元数据中,更多关于元数据的配置以及如何更改用于配置 RKE 集群的 Kubernetes 版本的信息,请参考 [Rancher Kubernetes 元数据](../../../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md)。
+
+Rancher Kubernetes 元数据包含 Kubernetes 版本信息,Rancher 使用这些信息来配置 [RKE 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)。
+
+关于元数据的工作原理以及如何配置元数据,请参考 [Rancher Kubernetes 元数据](../../../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md)。
+
+## 全局设置
+
+控制某些全局级别 Rancher 配置项可以在顶部的导航栏中找到。
+
+点击左上角的 **☰** ,然后选择 **全局设置**,查看和配置以下设置:
+
+- **设置**: 各种 Rancher 默认值,例如用户密码的最小长度 (`password-min-length`)。在修改这些设置项时应该谨慎,因为设置无效的值可能会破坏 Rancher 的安装。
+- **功能开关**: 可以打开或关闭 Rancher 的某些功能,一些标志用于 [实验性功能](#启用实验性功能).
+- **横幅**: 可以添加到门户上固定位置的元素,例如你可以使用这些选项在用户登录 Rancher 时为他们设置[自定义的横幅](custom-branding.md#固定横幅)。
+- **品牌**: 你可以[自定义](custom-branding.md) Rancher UI 的设计元素,你可以增加一个自定义的 logo 或 favicon,也可以修改 UI 的颜色。
+- **性能**: Rancher UI 的性能设置,例如增量资源加载。
+- **主页链接**: Rancher UI **主页**页面上显示的链接,你可以修改默认链接的可见性或者增加自己的链接。
+
+### 启用实验性功能
+
+Rancher 包含一些默认处于实验性和/或禁用的功能,功能开关允许你启用这些特性。更多信息请参考[功能开关](../../advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
+
+### 全局配置
+
+仅在激活 **legacy** [功能开关](../../advanced-user-guides/enable-experimental-features/enable-experimental-features.md) 时才可以看见**全局配置**选项。在 v2.6 及更新版本新安装的 Rancher 已经默认禁用了 **legacy** 特性。如果你是从早期的 Rancher 版本升级,或者在 Rancher v2.6 及更新版本上启用了 **legacy** 特性,顶部导航菜单中将会显示**全局配置**:
+
+1. 点击左上角的 **☰**。
+1. 在 **旧版应用** 中选择 **全局配置**。
+
+**全局配置**提供以下功能:
+
+- **应用商店**
+- **全局 DNS 条目**
+- **全局 DNS 提供商**
+
+由于这些是旧版特性,请参考 Rancher v2.0-v2.4 的[应用商店](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md), [全局 DNS 条目](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#adding-a-global-dns-entry), 以及 [全局 DNS 提供商](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md#editing-a-global-dns-provider)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-microsoft-ad-federation-service-saml.md
similarity index 60%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-microsoft-ad-federation-service-saml.md
index 9cb1a64ca66..3b96b5f62ff 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-microsoft-ad-federation-service-saml.md
@@ -2,14 +2,18 @@
title: 配置 Microsoft AD FS (SAML)
---
-如果你的组织使用 Microsoft 联合身份验证服务 (AD FS) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 AD FS 凭证登录。
+
+
+
+
+如果你的组织使用 Active Directory Federation Service (AD FS) 进行用户身份认证,你可以通过配置 Rancher 来允许用户使用 AD FS 凭证登录。
## 先决条件
已安装 Rancher。
- 获取你的 Rancher Server URL。配置 AD FS 时,请使用该 URL 替换 `` 占位符。
-- 你的 Rancher 必须具有全局管理员账号。
+- 你必须在 Rancher 安装时具有全局管理员账号。
你必须配置 [Microsoft AD FS 服务器](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services)。
@@ -18,10 +22,10 @@ title: 配置 Microsoft AD FS (SAML)
## 配置概要
-要让 Rancher Server 使用 Microsoft AD FS,你需要在 Active Directory 服务器上配置 AD FS,并将 Rancher 配置为使用 AD FS 服务器。如果需要获取在 Rancher 中设置 Microsoft AD FS 身份验证的指南,请参见:
+要让 Rancher Server 使用 Microsoft AD FS,你需要在 Active Directory 服务器上配置 AD FS,并将 Rancher 配置为使用 AD FS 服务器。如果需要获取在 Rancher 中设置 Microsoft AD FS 身份认证的指南,请参见:
-- [1. 在 Microsoft AD FS 中配置 Rancher](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md)
-- [2. 在 Rancher 中配置 Microsoft AD FS](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md)
+- [1. 在 Microsoft AD FS 中配置 Rancher](configure-ms-adfs-for-rancher.md)
+- [2. 在 Rancher 中配置 Microsoft AD FS](configure-rancher-for-ms-adfs.md)
:::note SAML 身份提供商注意事项
@@ -32,7 +36,4 @@ title: 配置 Microsoft AD FS (SAML)
:::
-
-### 后续操作
-
-[在 Microsoft AD FS 中配置 Rancher](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md)
+### [后续操作:在 Microsoft AD FS 中配置 Rancher](configure-ms-adfs-for-rancher.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
new file mode 100644
index 00000000000..83bcfee396b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md
@@ -0,0 +1,88 @@
+---
+title: 1. 在 Microsoft AD FS 中配置 Rancher
+---
+
+
+
+
+
+在配置 Rancher 以支持 Active Directory Federation Service (AD FS) 之前,你必须在 AD FS 中将 Rancher 添加为 [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts)(信赖方信任)。
+
+1. 以管理用户身份登录 AD 服务器。
+
+1. 打开 **AD FS Management** 控制台。在 **Actions** 菜单中选择 **Add Relying Party Trust...**。然后单击 **Start**。
+
+ 
+
+1. 选择 **Enter data about the relying party manually** 作为获取信赖方数据的选项。
+
+ 
+
+1. 为 **Relying Party Trust** 设置**显示名称**,例如 `Rancher`。
+
+ 
+
+1. 选择 **AD FS profile** 作为信赖方信任的配置文件。
+
+ 
+
+1. 留空 **optional token encryption certificate**,因为 Rancher AD FS 不会使用它。
+
+ 
+
+1. 选择 **Enable support for the SAML 2.0 WebSSO protocol** 并在 Service URL 处输入 `https:///v1-saml/adfs/saml/acs`。
+
+ 
+
+1. 将 `https:///v1-saml/adfs/saml/metadata` 添加为 **Relying party trust identifier**。
+
+ 
+
+1. 本教程不涉及多重身份认证。如果你想配置多重身份认证,请参见 [Microsoft 文档](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs)。
+
+ 
+
+1. 在 **Choose Issuance Authorization RUles** 中,你可以根据用例选择任何一个可用选项。但是考虑到本指南的目的,请选择 **Permit all users to access this relying party**。
+
+ 
+
+1. 检查所有设置后,选择 **Next** 来添加信赖方信任。
+
+ 
+
+1. 选择 **Open the Edit Claim Rules...**。然后单击 **Close**。
+
+ 
+
+1. 在 **Issuance Transform Rules** 选项卡中,单击 **Add Rule...**。
+
+ 
+
+1. 在 **Claim rule template** 中选择 **Send LDAP Attributes as Claims**。
+
+ 
+
+1. 将 **Claim rule name** 设置为所需的名称(例如 `Rancher Attributes`)并选择 **Active Directory** 作为 **Attribute store**。创建对应下表的映射:
+
+ | LDAP 属性 | 传出声明类型 |
+ | -------------------------------------------- | ------------ |
+ | Given-Name | Given Name |
+ | User-Principal-Name | UPN |
+ | Token-Groups - Qualified by Long Domain Name | Group |
+ | SAM-Account-Name | 名称 |
+
+
+
+ 
+
+1. 从 AD 服务器的以下位置下载 `federationmetadata.xml`:
+
+```
+https:///federationmetadata/2007-06/federationmetadata.xml
+```
+
+**结果**:你已将 Rancher 添加为依赖信任方。现在你可以配置 Rancher 来使用 AD。
+
+### 后续操作
+
+[在 Rancher 中配置 Microsoft AD FS ](configure-rancher-for-ms-adfs.md)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
new file mode 100644
index 00000000000..26e89e2058d
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md
@@ -0,0 +1,57 @@
+---
+title: 2. 在 Rancher 中配置 Microsoft AD FS
+---
+
+
+
+
+
+完成[在 Microsoft AD FS 中配置 Rancher](configure-ms-adfs-for-rancher.md) 后,将你的 Active Directory Federation Service (AD FS) 信息输入 Rancher,以便 AD FS 用户可以通过 Rancher 进行身份认证。
+
+:::note 配置 ADFS 服务器的重要说明:
+
+- SAML 2.0 WebSSO 协议服务 URL 为:`https:///v1-saml/adfs/saml/acs`
+- 信赖方信任标识符 URL 为:`https:///v1-saml/adfs/saml/metadata`
+- 你必须从 AD FS 服务器导出 `federationmetadata.xml` 文件。你可以在 `https:///federationmetadata/2007-06/federationmetadata.xml` 中找到该文件。
+
+:::
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏,单击**认证**。
+1. 单击 **ADFS**。
+1. 填写**配置 AD FS 账号**表单。Microsoft AD FS 允许你指定现有的 Active Directory (AD) 服务器。[以下配置示例](#配置)描述了如何将 AD 属性映射到 Rancher 中的字段。
+1. 完成**配置 AD FS 账号**表单后,单击**启用**。
+
+ Rancher 会将你重定向到 AD FS 登录页面。输入使用 Microsoft AD FS 进行身份验证的凭证,来验证你的 Rancher AD FS 配置。
+
+ :::note
+
+ 你可能需要禁用弹出窗口阻止程序才能看到 AD FS 登录页面。
+
+ :::
+
+**结果**:已将 Rancher 配置为使用 AD FS。你的用户现在可以使用 AD FS 登录名登录 Rancher。
+
+## 配置
+
+| 字段 | 描述 |
+| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| 显示名称字段 | 包含用户显示名称的 AD 属性。 示例:`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` |
+| 用户名字段 | 包含用户名/给定名称的 AD 属性。 示例:`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` |
+| UID 字段 | 每个用户独有的 AD 属性。 示例:`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` |
+| 用户组字段 | 创建用于管理组成员关系的条目。 示例:`http://schemas.xmlsoap.org/claims/Group` |
+| Rancher API 主机 | Rancher Server 的 URL。 |
+| 私钥/证书 | 在 Rancher 和你的 AD FS 之间创建安全外壳(SSH)的密钥/证书对。确保将 Common Name (CN) 设置为 Rancher Server URL。 [证书创建命令](#cert-command) |
+| 元数据 XML | 从 AD FS 服务器导出的 `federationmetadata.xml` 文件。 你可以在 `https:///federationmetadata/2007-06/federationmetadata.xml` 找到该文件。 |
+
+
+
+:::tip
+
+你可以使用 openssl 命令生成证书。例如:
+
+```
+openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
+```
+
+:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-openldap.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
similarity index 58%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-openldap.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
index 5b8f3d014d9..7594371a296 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-openldap.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/configure-openldap.md
@@ -2,34 +2,38 @@
title: 配置 OpenLDAP
---
-如果你的组织使用 LDAP 进行用户身份验证,则可以配置 Rancher 与 OpenLDAP 服务器通信,从而对用户进行身份验证。这使 Rancher 管理员可以对外部用户系统中的用户和组进行集群和项目的访问控制,同时允许最终用户在登录 Rancher UI 时使用 LDAP 凭证进行身份验证。
+
+
+
+
+如果你的组织使用 LDAP 进行认证,则可以配置 Rancher 与 OpenLDAP 服务器通信以对用户进行认证。这时 Rancher 管理员可以对外部用户系统中的用户和组进行集群和项目的访问控制,同时允许终端用户在登录 Rancher UI 时使用其 LDAP 凭据进行身份认证。
## 先决条件
必须为 Rancher 配置 LDAP 绑定账号(即 ServiceAccount),来搜索和检索应该具有访问权限的用户和组的 LDAP 条目。建议不要使用管理员账号或个人账号,而应在 OpenLDAP 中创建一个专用账号,该账号对配置的搜索库下的用户和组需要具有只读权限(参见下文)。
-> **使用 TLS?**
+> **使用 TLS?**
>
> 如果 OpenLDAP 服务器使用的证书是自签名的或不是来自认可的证书颁发机构,请确保手头有 PEM 格式的 CA 证书(包含所有中间证书)。你必须在配置期间粘贴此证书,以便 Rancher 能够验证证书链。
## 在 Rancher 中配置 OpenLDAP
-配置 OpenLDAP 服务器,组和用户的设置。有关填写每个字段的帮助,请参见[配置参考](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/openldap-config-reference.md)。
+配置 OpenLDAP 服务器,组和用户的设置。有关填写每个字段的帮助,请参见[配置参考](openldap-config-reference.md)
-> 在开始之前,请熟悉[外部身份验证配置和主体用户](authentication-config.md#外部身份验证配置和用户主体)的概念。
+> 在开始之前,请熟悉[外部认证配置和用户主体](../authentication-config/authentication-config.md#外部认证配置和用户主体)的概念。
1. 在左上角,单击 **☰ > 用户 & 认证**。
1. 在左侧导航栏,单击**认证**。
1. 单击 **OpenLDAP**。填写**配置 OpenLDAP 服务器**表单。
1. 点击**启用**。
-### 测试身份验证
+### 测试认证
-完成配置后,请测试与 OpenLDAP 服务器的连接。如果测试成功,则表明 OpenLDAP 身份验证已启用。
+完成配置后,请测试与 OpenLDAP 服务器的连接。如果测试成功,则表明 OpenLDAP 认证已启用。
:::note
-与此步骤中输入的凭证相关的 OpenLDAP 用户将映射到本地主体账号,并在 Rancher 中分配系统管理员权限。因此,你应该决定使用哪个 OpenLDAP 账号来执行此步骤。
+于此步骤中输入的 OpenLDAP 用户凭证将映射到本地主体账号,并在 Rancher 中分配系统管理员权限。因此,你应该决定使用哪个 OpenLDAP 账号来执行此步骤。
:::
@@ -38,7 +42,7 @@ title: 配置 OpenLDAP
**结果**:
-- OpenLDAP 验证配置成功。
+- OpenLDAP 认证配置成功。
- 与输入凭证对应的 LDAP 用户被映射到本地主体(管理员)账号。
:::note
@@ -49,4 +53,4 @@ title: 配置 OpenLDAP
## 附录:故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../faq/technical-items.md#如何启用调试日志记录)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/openldap-config-reference.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/openldap-config-reference.md
new file mode 100644
index 00000000000..24c338fe708
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/openldap-config-reference.md
@@ -0,0 +1,81 @@
+---
+title: OpenLDAP 配置参考
+---
+
+
+
+
+
+有关配置 OpenLDAP 认证的更多详细信息,请参见[官方文档](https://www.openldap.org/doc/)。
+
+> 在开始之前,请熟悉[外部认证配置和用户主体](../authentication-config/authentication-config.md#外部认证配置和用户主体)的概念。
+
+## 背景:OpenLDAP 认证流程
+
+1. 当用户尝试使用其 LDAP 凭证登录时,Rancher 会使用具有搜索目录和读取用户/组属性权限的 ServiceAccount,创建与 LDAP 服务器的初始绑定。
+2. 然后,Rancher 使用搜索筛选器根据用户名和配置的属性映射为用户搜索目录。
+3. 找到用户后,将使用用户的 DN 和提供的密码,通过另一个 LDAP 绑定请求对用户进行身份认证。
+4. 认证成功后,Rancher 将基于用户对象的成员属性和配置的用户映射属性执行组搜索,来解析组成员。
+
+## OpenLDAP 服务器配置
+
+你将需要输入地址,端口和协议来连接到 OpenLDAP 服务器。不安全流量的标准端口为 `389`,TLS 流量的标准端口为 `636`。
+
+> **使用 TLS?**
+>
+> 如果 OpenLDAP 服务器使用的证书是自签名的或不是来自认可的证书颁发机构,请确保手头有 PEM 格式的 CA 证书(包含所有中间证书)。你必须在配置期间粘贴此证书,以便 Rancher 能够验证证书链。
+
+如果你不确定要在用户/组`搜索库`字段中输入什么值,请咨询你的 LDAP 管理员,或参见 Active Directory 身份验证文档中的[使用 ldapsearch 确定搜索库和 Schema](../../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md#附录使用-ldapsearch-确定搜索库和-schema) 章节。
+
+OpenLDAP 服务器参数
+
+| 参数 | 描述 |
+| :---------------------- | :----------------------------------------------------------------------------------------------------------------------------------- |
+| 主机名 | 指定 OpenLDAP 服务器的主机名或 IP 地址。 |
+| 端口 | 指定 OpenLDAP 服务器监听连接的端口。未加密的 LDAP 通常使用 389 的标准端口,而 LDAPS 使用 636 端口。 |
+| TLS | 选中此框可启用 SSL/TLS 上的 LDAP(通常称为 LDAPS)。如果服务器使用自签名/企业签名的证书,则还需要粘贴 CA 证书。 |
+| 服务器连接超时 | Rancher 在认为无法访问服务器之前等待的时间(秒)。 |
+| ServiceAccount 标识名称 | 输入用于绑定,搜索和检索 LDAP 条目的用户的标识名称(DN)。 |
+| ServiceAccount 密码 | ServiceAccount 的密码。 |
+| 用户搜索库 | 输入目录树中开始搜索用户对象的节点的标识名称(DN)。所有用户都必须是此基础标识名称的后代。例如,"ou=people,dc=acme,dc=com"。 |
+| 组搜索库 | 如果组位于`用户搜索库`下配置的节点之外的其他节点下,则需要在此处提供标识名称。否则,将此字段留空。例如:"ou=groups,dc=acme,dc=com"。 |
+
+## 用户/组 Schema 配置
+
+如果你的 OpenLDAP 目录不同于标准的 OpenLDAP Schema,则必须完成**自定义 Schema** 部分实现匹配。
+
+请注意,Rancher 使用本节中配置的属性映射来构造搜索筛选器和解析组成员。因此,我们建议你验证此处的配置是否与你在 OpenLDAP 中使用的 Schema 匹配。
+
+如果你不确定 OpenLDAP 服务器中使用的用户/组 Schema,请咨询你的 LDAP 管理员,或参见 Active Directory 身份验证文档中的[使用 ldapsearch 确定搜索库和 Schema](../../../../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-active-directory.md#附录使用-ldapsearch-确定搜索库和-schema) 章节。
+
+### 用户 Schema 配置
+
+下表详细说明了用户 Schema 配置的参数。
+
+用户 Schema 配置参数
+
+| 参数 | 描述 |
+| :---------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Object Class | 域中用于用户对象的对象类别名称。如果定义了此参数,则仅指定对象类别的名称 - *请勿*将其放在 LDAP 包装器中,例如 `&(objectClass=xxxx)`。 |
+| Username Attribute | 用户属性的值适合作为显示名称。 |
+| Login Attribute | 登录属性的值与用户登录 Rancher 时输入的凭证的用户名部分匹配。通常是 `uid`。 |
+| User Member Attribute | 包含用户所属组的标识名称的用户属性。通常是 `memberOf` 或 `isMemberOf`。 |
+| Search Attribute | 当用户输入文本以在用户界面中添加用户或组时,Rancher 会查询 LDAP 服务器,并尝试根据此设置中提供的属性匹配用户。可以通过使用管道(“\|”)符号分隔属性来指定多个属性。 |
+| User Enabled Attribute | 如果 OpenLDAP 服务器的 Schema 支持使用用户属性的值来评估账号是禁用还是关闭,请输入该属性的名称。默认的 OpenLDAP Schema 不支持此功能,因此此字段通常留空。 |
+| Disabled Status Bitmask | 禁用/锁定的用户账号的值。如果 `User Enabled Attribute` 是空的,则忽略此参数。 |
+
+### 组 Schema 配置
+
+下表详细说明了组 Schema 配置的参数。
+
+组 Schema 配置参数
+
+| 参数 | 描述 |
+| :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Object Class | 域中用于组条目的对象类别名称。如果定义了此参数,则仅指定对象类别的名称 - *请勿*将其放在 LDAP 包装器中,例如 `&(objectClass=xxxx)`。 |
+| Name Attribute | 名称属性的值适合作为显示名称。 |
+| Group Member User Attribute | **用户属性**的名称。它的格式与 `Group Member Mapping Attribute` 中的组成员匹配。 |
+| Group Member Mapping Attribute | 包含组成员的组属性的名称。 |
+| Search Attribute | 在 UI 中将组添加到集群或项目时,用于构造搜索筛选器的属性。请参见用户 Schema 的 `Search Attribute`。 |
+| Group DN Attribute | 组属性的名称,其格式与用户的组成员属性中的值匹配。参见 `User Member Attribute`。 |
+| Nested Group Membership | 此设置定义 Rancher 是否应解析嵌套组成员身份。仅当你的组织使用这些嵌套成员身份时才使用(即你有包含其他组作为成员的组)。如果你使用 Shibboleth,此选项会被禁用。 |
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
new file mode 100644
index 00000000000..4321f1e6765
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md
@@ -0,0 +1,32 @@
+---
+title: Shibboleth 和 OpenLDAP 的组权限
+---
+
+
+
+
+
+由于 Shibboleth 是 SAML 提供者,因此它不支持搜索用户组的功能。虽然 Shibboleth 集成可以验证用户凭证,但是如果没有其他配置,Shibboleth 不能在 Rancher 中给用户组分配权限。
+
+你可以通过配置 OpenLDAP 来解决这个问题。如果让 Shibboleth 使用 OpenLDAP 后端,你将能够在 Rancher 中搜索组,并从 Rancher UI 将集群、项目或命名空间等资源分配给用户组。
+
+### 名词解释
+
+- **Shibboleth**:用于计算机网络和互联网的单点登录系统。它允许用户仅使用一种身份登录到各种系统。它验证用户凭证,但不单独处理组成员身份。
+- **SAML**:安全声明标记语言(Security Assertion Markup Language),用于在身份提供程序和服务提供商之间交换认证和授权数据的开放标准。
+- **OpenLDAP**:轻型目录访问协议(LDAP)的免费开源实现。它用于管理组织的计算机和用户。OpenLDAP 对 Rancher 用户很有用,因为它支持组。只要组已存在于身份提供程序中,你就可以在 Rancher 中为组分配权限,从而让组访问资源(例如集群,项目或命名空间)。
+- **IdP 或 IDP**:身份提供程序。OpenLDAP 是身份提供程序的一个例子。
+
+### 将 OpenLDAP 组权限添加到 Rancher 资源
+
+下图说明了 OpenLDAP 组的成员如何访问 Rancher 中该组有权访问的资源。
+
+例如,集群所有者可以将 OpenLDAP 组添加到集群,从而让组有权查看大多数集群级别的资源并创建新项目。然后,OpenLDAP 组成员在登录 Rancher 后就可以访问集群。
+
+在这种情况下,OpenLDAP 允许集群所有者在分配权限时搜索组。如果没有 OpenLDAP,将不支持搜索组的功能。
+
+当 OpenLDAP 组的成员登录到 Rancher 时,用户将被重定向到 Shibboleth 并在那里输入用户名和密码。
+
+Shibboleth 会验证用户的凭证,并从 OpenLDAP 检索用户属性,其中包括用户所在的组信息。然后 Shibboleth 将向 Rancher 发送一个包含用户属性的 SAML 断言。Rancher 会使用组数据,以便用户可以访问他所在的组有权访问的所有资源。
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-shibboleth-saml.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
similarity index 69%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-shibboleth-saml.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
index 49a68af19f4..285a5d3e6aa 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/configure-shibboleth-saml.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/configure-shibboleth-saml.md
@@ -2,28 +2,31 @@
title: 配置 Shibboleth (SAML)
---
-如果你的组织使用 Shibboleth Identity Provider (IdP)) 进行用户身份验证,你可以通过配置 Rancher 来允许用户使用 Shibboleth 凭证登录。
+
+
+
-在此配置中,当 Rancher 用户登录时,他们将被重定向到 Shibboleth IdP 来输入凭证。身份验证结束后,他们将被重定向回 Rancher UI。
+如果你的组织使用 Shibboleth Identity Provider (IdP) 进行用户身份认证,你可以通过配置 Rancher 来允许用户使用 Shibboleth 凭证登录。
-如果你将 OpenLDAP 配置为 Shibboleth 的后端,SAML 断言会返回到 Rancher,其中包括用于引用组的用户属性。然后,通过身份验证的用户将能够访问其所在的组有权访问的 Rancher 资源。
+在此配置中,当 Rancher 用户登录时,他们将被重定向到 Shibboleth IdP 来输入凭证。认证结束后,他们将被重定向回 Rancher UI。
-> 本节假定你已了解 Rancher,Shibboleth 和 OpenLDAP 是如何协同工作的。有关工作原理的详细说明,请参见[本页](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-shibboleth-saml/about-group-permissions.md)。
+如果你将 OpenLDAP 配置为 Shibboleth 的后端,SAML 断言会返回到 Rancher,其中包括用于引用组的用户属性。然后,通过认证的用户将能够访问其所在的组有权访问的 Rancher 资源。
+> 本节假定你已了解 Rancher、Shibboleth 和 OpenLDAP 是如何协同工作的。有关工作原理的详细说明,请参见[本页](about-group-permissions.md)
-## 在 Rancher 中设置 Shibboleth
+# 在 Rancher 中设置 Shibboleth
### Shibboleth 先决条件
->
+
> - 你必须配置了 Shibboleth IdP 服务器。
> - 以下是 Rancher Service Provider 配置所需的 URL:
-> 元数据 URL:`https:///v1-saml/shibboleth/saml/metadata`
-> 断言使用者服务 (ACS) URL:`https:///v1-saml/shibboleth/saml/acs`
+> 元数据 URL:`https:///v1-saml/shibboleth/saml/metadata`
+> 断言使用者服务 (ACS) URL:`https:///v1-saml/shibboleth/saml/acs`
> - 从 IdP 服务器导出 `metadata.xml` 文件。详情请参见 [Shibboleth 文档](https://wiki.shibboleth.net/confluence/display/SP3/Home)。
### 在 Rancher 中配置 Shibboleth
-如果你的组织使用 Shibboleth 进行用户身份验证,你可以通过配置 Rancher 来允许你的用户使用 IdP 凭证登录。
+如果你的组织使用 Shibboleth 进行用户身份认证,你可以通过配置 Rancher 来允许你的用户使用 IdP 凭证登录。
1. 在左上角,单击 **☰ > 用户 & 认证**。
1. 在左侧导航栏,单击**认证**。
@@ -47,12 +50,12 @@ title: 配置 Shibboleth (SAML)
```
openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com"
```
- 1. **IDP 元数据**:从 IdP 服务器导出的 `metadata.xml` 文件。
+ 1. **IDP 元数据**:从 IdP 服务器导出的 `metadata.xml` 文件。
1. 完成**配置 Shibboleth 账号**表单后,单击**启用**。
- Rancher 会将你重定向到 IdP 登录页面。输入使用 Shibboleth IdP 进行身份验证的凭证,来验证你的 Rancher Shibboleth 配置。
+ Rancher 会将你重定向到 IdP 登录页面。输入使用 Shibboleth IdP 的用户凭证,来验证你的 Rancher Shibboleth 配置。
:::note
@@ -71,11 +74,11 @@ SAML 协议不支持用户或用户组的搜索或查找。因此,如果你没
- 添加组时,必须从文本框旁边的下拉列表中选择组。Rancher 假定来自文本框的任何输入都是用户。
- 用户组下拉列表仅显示你所属的用户组。如果你不是某个组的成员,你将无法添加该组。
-要在 Rancher 中分配权限时启用搜索组,你需要为支持组的 SAML 身份验证提供商配置后端(例如 OpenLDAP)。
+要在 Rancher 中分配权限时启用搜索组,你需要为 SAML 身份认证服务配置支持组的后端(例如 OpenLDAP)。
-## 在 Rancher 中设置 OpenLDAP
+# 在 Rancher 中设置 OpenLDAP
-如果你将 OpenLDAP 配置为 Shibboleth 的后端,SAML 断言会返回到 Rancher,其中包括用于引用组的用户属性。然后,通过身份验证的用户将能够访问其所在的组有权访问的 Rancher 资源。
+如果你将 OpenLDAP 配置为 Shibboleth 的后端,SAML 断言会返回到 Rancher,其中包括用于引用组的用户属性。然后,通过认证的用户将能够访问其所在的组有权访问的 Rancher 资源。
### OpenLDAP 先决条件
@@ -87,16 +90,15 @@ SAML 协议不支持用户或用户组的搜索或查找。因此,如果你没
### 在 Rancher 中配置 OpenLDAP
-配置 OpenLDAP 服务器,组和用户的设置。有关填写每个字段的帮助,请参见[配置参考](../how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/configure-openldap/openldap-config-reference.md)。请注意,嵌套组成员资格不适用于 Shibboleth。
+配置 OpenLDAP 服务器,组和用户的设置。有关填写每个字段的帮助,请参见[配置参考](../configure-openldap/openldap-config-reference.md)。请注意,嵌套组成员资格不适用于 Shibboleth。
-> 在开始之前,请熟悉[外部身份验证配置和主体用户](authentication-config.md#外部身份验证配置和用户主体)的概念。
+> 在开始之前,请熟悉[外部认证配置和用户主体](../authentication-config/authentication-config.md#外部认证配置和用户主体)的概念。
1. 使用初始的本地 `admin` 账号登录到 Rancher UI。
1. 在左上角,单击 **☰ > 用户 & 认证**。
1. 在左侧导航栏,单击**认证**。
-1. 单击 **Shibboleth**,如果已配置 SAML,则单击**编辑配置**。
-1. 在**用户和组搜索**下,选中**配置 OpenLDAP Server**。
+1. 单击 **OpenLDAP**。将显示**配置 OpenLDAP 服务器**表单。
## 故障排除
-如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../faq/technical-items.md#如何启用调试日志记录)。
+如果在测试与 OpenLDAP 服务器的连接时遇到问题,请首先仔细检查为 ServiceAccount 输入的凭证以及搜索库配置。你还可以检查 Rancher 日志来查明问题的原因。调试日志可能包含有关错误的更详细信息。详情请参见[如何启用调试日志](../../../../faq/technical-items.md#how-can-i-enable-debug-logging)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
new file mode 100644
index 00000000000..1136fdabc91
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md
@@ -0,0 +1,78 @@
+---
+title: Pod 安全策略
+---
+
+:::caution
+Pod 安全策略仅在 Kubernetes v1.24 之前可用。[Pod 安全标准](pod-security-standards.md) 是内置的替代方案。
+:::
+
+[Pod 安全策略(PSP)](https://kubernetes.io/docs/concepts/security/pod-security-policy/)是用来控制安全敏感相关 Pod 规范(例如 root 特权)的对象。
+
+如果某个 Pod 不满足 PSP 指定的条件,Kubernetes 将不允许它启动,Rancher 中将显示错误消息 `Pod is forbidden: unable to validate...`。
+
+
+## PSP 工作原理
+
+你可以在集群或项目级别分配 PSP。
+
+PSP 通过继承的方式工作:
+
+- 默认情况下,分配给集群的 PSP 由其项目以及添加到这些项目的任何命名空间继承。
+- **例外**:无论 PSP 是分配给集群还是项目,未分配给项目的命名空间不会继承 PSP。因为这些命名空间没有 PSP,所以这些命名空间的工作负载 deployment 将失败,这是 Kubernetes 的默认行为。
+- 你可以通过将不同的 PSP 直接分配给项目来覆盖默认 PSP。
+
+在分配 PSP 之前已经在集群或项目中运行的任何工作负载如果符合 PSP,则不会被检查。你需要克隆或升级工作负载以查看它们是否通过 PSP。
+
+在 [Kubernetes 文档](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)中阅读有关 Pod 安全策略的更多信息。
+
+## 默认 PSP
+
+Rancher 内置了三个默认 Pod 安全策略 (PSP),分别是 `restricted-noroot`(受限 noroot),`restricted`(受限)和 `unrestricted`(不受限)策略。
+
+### 受限-NoRoot
+
+此策略基于 Kubernetes [示例受限策略](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml)。它极大地限制了可以将哪些类型的 Pod 部署到集群或项目中。这项策略:
+
+- 阻止 Pod 以特权用户身份运行,并防止特权升级。
+- 验证服务器所需的安全机制是否到位,例如限制哪些卷只能挂载到核心卷类型,并防止添加 root 补充组。
+
+### 受限
+
+该策略是宽松版的 `restricted-noroot` 策略,除了允许以特权用户身份运行容器外,几乎所有限制都到位。
+
+### 不受限
+
+该策略等效于在禁用 PSP 控制器的情况下运行 Kubernetes。对于可以将哪些 Pod 部署到集群或项目中,它没有任何限制。
+
+:::note 重要提示:
+
+禁用 PSP 时,默认 PSP **不会**自动从集群中删除。如果不再需要它们,你必须手动删除它们。
+
+:::
+
+## 创建 PSP
+
+使用 Rancher,你可以使用我们的 GUI 创建 Pod 安全策略,而不是创建 YAML 文件。
+
+### 要求
+
+Rancher 只能为[使用 RKE 启动的集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)分配 PSP。
+
+你必须先在集群级别启用 PSP,然后才能将它们分配给项目。这可以通过[编辑集群](../../../pages-for-subheaders/cluster-configuration.md)来配置。
+
+最好的做法是在集群级别设置 PSP。
+
+我们建议在集群和项目创建期间添加 PSP,而不是将其添加到现有的项目或集群中。
+
+### 在 Rancher UI 中创建 PSP
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在左侧导航栏中,单击 **Pod 安全策略**。
+1. 单击**添加策略**。
+1. 为策略命名。
+1. 填写表格的每个部分。请参阅 [Kubernetes 文档](https://kubernetes.io/docs/concepts/policy/pod-security-policy/),了解每个策略的作用。
+1. 单击**创建**。
+
+## 配置
+
+关于 PSP 的 Kubernetes 文档,请参阅[这里](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md
new file mode 100644
index 00000000000..59bd02daf07
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/custom-branding.md
@@ -0,0 +1,192 @@
+---
+title: 定制品牌
+---
+
+## 先决条件
+
+你至少需要拥有集群成员权限。
+
+## 品牌配置
+
+要配置品牌设置:
+
+1. 单击**☰ > 全局设置**。
+2. 单击**公司品牌**。
+
+### 自有品牌公司名称
+
+此选项将大多数出现的 “Rancher” 替换为你提供的值。名称中需要包含 Rancher 的文件,例如 `rancher-compose.yml`,不会被更改。
+
+### 支持链接
+
+使用 URL 地址发送新的`提交问题`报告,而不是让用户发送到 GitHub Issue 页面。可选择显示 Rancher 社区支持链接。
+
+### Logo
+
+上传深色和浅色的 Logo 来替换顶层导航标题中的 Rancher logo。
+
+### 主颜色
+
+使用自定义颜色替换整个 UI 中使用的主颜色。
+
+### 固定横幅
+
+在页眉、页脚或两者中显示自定义固定横幅。
+
+要配置横幅:
+
+1. 单击**☰ > 全局设置**。
+2. 点击**横幅**。
+
+## 自定义导航链接
+
+在本节中,你将了解如何配置**集群仪表板**左侧导航栏中的链接。要访问集群仪表板:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到你想要自定义导航链接的集群,然后单击 **Explore**。
+
+添加链接可以让你快速访问安装在集群上的服务。例如,你可以为安装了 Istio 的集群添加指向 Kiali UI 的链接,或者为安装了 Rancher Monitoring 的集群添加指向 Grafana UI 的链接。
+
+自定义链接不会影响可以访问各项服务的用户。
+
+可以在顶层创建链接,并且可以将多个链接组合在一起。
+
+### 添加自定义导航链接
+
+:::note 先决条件:
+
+你至少需要拥有集群成员或项目成员权限。
+
+:::
+
+1. 点击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到要添加自定义导航链接的集群,然后单击 **Explore**。
+2. 在顶部导航菜单中,单击 **🔍(资源搜索)**。
+3. 输入 **Nav** 并点击 **Nav Links**。
+4. 单击**使用 YAML 文件创建**。
+5. 创建导航链接的最简单方法是添加以下字段:
+
+ name: linkname
+ toURL: https://example.com
+
+ 有关设置链接(包括可选字段)的更多详细信息,请参阅[链接配置](#链接配置)。
+6. 单击**创建**。
+
+## 链接配置
+
+### `name`
+
+链接的显示名称。必填项。
+
+### `group`
+
+单击时展开的一组链接的名称。
+
+可选项。如果未提供,则显示为独立链接。
+
+组与独立链接分开显示,如下所示:
+
+
+
+### `iconSrc`
+
+Base64 格式的 Logo 图标源。
+
+以下是 Base64 格式的 Grafana Logo 示例:
+
+```
+data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAgAElEQVR4Aey9d5xkZZn3zb/P+3mffZ9nDcDAzHTuqs49PZEhCBhBJc10nO7pHKbD9PR07p5AWlEEZM2ioph3dXGNqLgCxhVBRIYRmNy5cjqnTlWdqu/7ue5zTk+DiNIsCn4suOacrjrxvq/fle/7PofX+ycDZNT/pIAUaUUmaRBKp8FMQ1L9qL6S4+VYUikIeuBrn+LppiuJNG/BvHYd7HbhbXLBZ/dB8AkwFokCHsAH6Kb8IxTHJIyBHwgDMTBT6h7yWAmb1L3sxyEjfxkYxDDQMa0nWV0vyE2slwZ5JtUO8v5JUhgk0EkRw5TnUg9sgJm03lsezkyTIU0C2VrNJU2WsdtTPVQyAmYU4mGIhDESaULAcSG5RjwJEQ8YsxgEOZoOcSxjvXsyKM8nL24QJ4UmF5TnlL7IWLure/G/3lnn/PVu9SrdaQVApO3/CCAZAYjNSLIVBrD/VMwSnsf4/B38ofWthFu3krhhPZmaLJZuyEY7vJPIV++AwEnImJwJ66qTFcMLSswkGWKkiKqtMIICwB890zL/2lwoHGLYnCIMtMqP3Md5N7mMDRDTBkhKAURAqNBs3TOdspjWERgrmkYuJbQMEPUeKdAEIBrIuSIKEiZ+B0ShADz7W/Tjv8TMLOIjybxcJwNJOUgAkjHQSFoAkedMWn2g7rXKV/9rnfZ3DRCRjgoN0ik2A0nDLgMkbYD3aU7dPcRT7ZfhadmEVldA/IZ1RN6TRahxO8f37CTyxU/B4pySvOlYHNM0sZhQRKgwTRrRWALOZc0lWksoY30n0lPkuMBCPYCcKn/Ic632Iy/ivNvy+4k2EOAamDbJ/rIKFYEhx74AWPIWcpRhv8dyu5mQTEEkmUYTjSsaMBWDiA9mjsF/foWTH76Z33zqEL6jD5IgRkTeR64valfOQQSL6My0Ap/c32qvlNJgq339v8Z5fzcAkcayBKrFkNIBCiDOD/Lj8jG2iZWOwvGHefxQNY+3beHk7grCDS7067JhRwn6dZX4d72L3zVei/6Vz0HYB/EQJINKawRTQXVP+UfYQEjdRphITBlTNIrFqBopIsqcsZnaQYww0iv5yA1XkuJ+eRJREStJvrMFhnO8A5S09ewCDoMkCQUw67KxOMQMCBkpkok4JIIQnYGf3k/s7mlO7N3Fw7VX8J2Ot3Pi/rvBXMLMJKxbx+UR5L4CEJ0IKYUbeV0xAUXDZVSrvZIGeHXPff0DRDGE9PRZPhGec8jhBWcr30uXCWEG4Xf/wW+H38ajXVUcaS7GX+dGvyYX6jeQvL6MZO1lzFx7Mc81XkPyM3eC/xlIz5LJzBIz/bbUtUyKZaksUtPUIS06wzK/LGluM6jwrVg9wkCvECDOe51lE2kL5w2drdU+Ths5bSBbMacsVMtGtKDFug5+5Q00Iw2JFOhhmD0C3/goS6M1HKvfiqfxMo7t3MLv2i7F97nDoJ+BpG45IXLysgYRgIhvJPeRu4QVibZ7LX/+rgDiNLTT58IADjM4rPI8HyXpgYc+yXODF3G0ZyPPtZSxUFeM/9p8MrUVJK4rIX5NMfEdm1jauZ1j7Vfj/exNcPoRSB2HxIICiHI+Hb4U00mYSWkP4RAhHTKiUexggfCEPJDiUOepX/5W3tN5R9m3PpZZJ6bdWbJ+kWPkto51JyaVxbBnpYtcT35XwFDXt8Ee8cJ//wj9X6c40fF2TtZU4qspJV5bidawCU/HxWgfHoTwccuhl4srE0saRnwQwwaIZQKa+BH6B0CcfnvVttIBK8jmFId5zjKEmA+WhJTIkeoYYxY+t5/FtmJ8zeUEWzdypqmM07VFhGpK0W9wYVyXh15dQLxnG/NdF/NE00V4PzgAv/0OLB5RbyWKQD2CML5wV1KMbIlmCSgkapQEkcLCNPJ72mJAsdXl+Vb7cRh+mcnlQvKl0IomUfs2mOT28rwCaiErgiW+hXWOaBSnzRSw4/Mw/wR87zN4xht55vqNzNxQQXj3VoyWzRjX5ZPcWUigrozozU0QeMbSNAnIyA0EcaQRE1N8EPWn0hoSDxSSRnntfl73GkTMAsvXsDnCYZAXMERc2dei2i0HVnWMdpro4etYuv58orUujLatLLZsZKapkqXaYqINRZi7XWQ63ASacwh2lhPtvZwjdVs4M94ETz4M8ajFjI5TLlsJLavwsu0GCA84JpX4uEAAVHBYGHa1H3lVuZaQxXgvAIh86QDFDqu+ECDSIstS3AGWnCdmUnwRjv4Y/XMHON51OSerSwjv2kCqdRta81ZiDZWwqwjq8onWFKIfrIPQs5CIKl/ekJvZDyagFJJbWKFuiQFLePwfJtZq+//PnieNLc64lUuwTYoXMITwZowMgbSu3EIjE8NMiKMdhmd/zlzrRjJ12UQb3IQaKojUbiRUW8VSQynzjQV4mtfjbz6fSNN5hBvXsrjbzXzTZjz1V/Bs0/Xw0A9g7qRy3E3DRzARUJpKni0ZSljpEUkcCEBsQR3BYIEIC2mxw+WBV/dx3v950TP5UshpBxskqURG+cvCjtImwqyyDYZ9pDPiMKfR4hHQY5aJdPIIPPg1jrS/nZndW/E0lRJodBHY5SbYUEq4biOx2goi16+D1iLCO/PwDL0HfvdD5X9JFNwXP+vjyL2UMJDnUs8kRpzkjv4BkNX1/l9wlmiOhHL4RIbaDrA0vs0UwifSMVEkuSWJsyTxRACMIKSi8Nj3WWyphLr16PWFaPVlGDs3ou2swldXpgCy0LoWT+t5RJreqEASaMpRDGLs2E6w+u2c7mkgcM/t8IdHID2PSZAQUaJmSrkAypgXXrClaTIj5kZcRXXiKlH4ygAibXA2Yme3wUqAJDMWWDJgGEmiWgzDFL1hCRcjHkWP+kgaPkgHQUyqIz8l+fHDzLa/i0DdVrTaUmL1LsINBTZIignXVRCpK8W3cx3Jdjehnfl4970bHnsA0rpi/QWxLqUf7SiZ2pd/BBPio0kQQyVO/4LO/hsd8ro2sYQxRPYJSJSZJYwhHSBkWoJUABJRqTLRM0m08KKlPYwgiR99kZOdlRi71pOuKSS90wJIrLqK2M4yZTaEGy0N4ml9M96W89Hqs0lVu0nt2Ii+YztPv8XF8ZZ3EPrkAfj9dyF50gaJhscwEP5U0twGrZlKkEhKwDNkZ7gV27yC7ndUxYtfwkymMZMp5L7KF0pJNj2OmYiR0MMkRWsIoyY8MPsYia/fyZmxGzjdsInAjjKM6nIS1aUKJOH6UgK7LAo2FBPc5WKh9kJiYoJWF7I08G5SP7sfMrryLuZsYCjZJYlFp39EWKhIn4TBVy8gXvyN/2e/fd0DREoY4sqvsEo8lHQSCSV8Z5c8RDJJxAeRUKvmm4FEGDQ/S1+6k2NdG4g1ZWPWFJLZWUZi5wbiOypJXF9GfIcbrT6HYNNaPC0X4G2+AF0BxEVqZ7k6NvRuN96dGzjR+VZOva+LyMNfgOizYrwQJkIkY1qmhcMcSZGccTJmCF0XJ/UVAESuuRytOssY8rWjSGVf12MYsbDKyYh0z4SXyIQWIB60hIU2D7/6FjN37uVo56XMNpQSqXdj1LpIVBdj1JSi1WwgUrtBaQ4xscTc8u7O40z9hYTa3UR2uvD2vpPIA/dBJooXmLOfQwkIx7SS13X6RwIY/wDI2Y77n94TDWKVMCQtP0Q6QRpfJJRsJXyfgWhaoCFaJk5SACKM4ZnlxIemOd2+mWhTnvJB4nUlmDsqMK8vVwDRdxYr0yLSkEekIYfwrhyCjXkqmajVFlvMc0M++k43vh2lnKzfxqmpRrRvfRwWHlPVW7G0D40YyaSBGRVJLV66xSWmZPIV96yyZRQSnIy9JRDkK7m6NIES1CIYMgkykv0W3yvug+giaB7LnDrxK1LfvYeT040cqdvEUm0JdFRhtpbhr84mXF9MuL4crWYjevVG9JoKYhIKbypgsTmH0w3rCLcWkbjBTbD9SrxfuxvSfnyYLDmvpQIXKzSpoFceVJlX/9AgTjP9j2/FsYyRVKQcdWlr4QrHIzQhYUIsbSq7Py2Z8/CC0h4cO8rRw3uZb9mKvstFZJdoixISO8tJ3lCuTKxItUjSYiVNtTqXAotyVBtd6ju91gWNLpI7skheV4BevYHFhks4PnA9S/feQup3D4B+0o7WxIjrYUxdEogWs4j584o+DkCk3kku9YJIlTjikZROUsIGKfEvvJDyg+EB3wkSj3yD2dt7OTnwdk7UbySwq4pE0wYiNYUEavOJNJfiF5OqoVw55HpNOUICEKVBmgo4U7+OsBR2Xu8itvsyFu65GVIegsRUpkN1hiRNRVvY7+3UellZ9FfYBq+oAf/8ya9rE0sAItlZAclLAUTLpIkmJfQoUZoASNLrsV/x9Gg3vqaLSNcWkagtQq8rUY6nxPT9tWXKUQ/WVygJKlJUnFIHMFKSEqvPg+4K0rU5pG7IgdpSkjdUMn/NBmbarmLu1n7MR/4NPE8qkMTQCKSlhtcSnlIoqyTpn++nFz9CACKaUsiWyqI9RD5ICkLyDhF0YmYYQ1+CmJhVi7BwlNPf+zK/PtBGtPsSwlefy+w7z1P+RazrYhYbN3K6upTFunIFDvE35H0jDQVKMKg2qCtV2mWxJksFOHivi2TdxczdNQqJWaKZABKtIxO2KhZsh1we09JuVtWxFaJ/8dd7LXz7ugaINLb4FpZ/YYd5RZ07XJK0SoESaZOY5icj9dcSvZLy7Ie/y+mBBsINW0nXlJKsKVEAEfva11CCTyI0dWUogNRV2FEbkZ6lSoKKjR6pd5FoLMLcVUSqzkXyhgL09+ShS/a9fjvB7qs5PlJP8vufhugzKjEmxp4wsDyiVIorS8PGibP/oowhP4oIdrbOvi2V5VfHtLJyDhLhk5yPbmmN2Ax4j5L61bc58pGDPDTcyFNd78SoLoTq9dBUjL67grn6ck5Ul3GmpgJPQxXB+nLCDW4VwXLMSzG7YrVWW/h2ZqHX5cF1hcTrt+G5bS/EThNP+YjKmwowklEwJWJllZo4Cl4EnHqdF33h18aXr2uAqCa0TQtpaKvCVuobjGVTS3zAuCHJvCCYAZICEM2DcecowV0X2aZDKbo4ohLOFC2xTGJarKA66xjruGIideUqbyI5AUvTyPluxBwzatzqev66TXj73g2fvRWOPQpxj2KaU1qIZ2NhO5ttMbcw+LJ5btsjmYyE44QEHBKIcLZytDU2Q5KOkmqRrELELsBPZOKQDkPKC8YMnPkl+r/fydHhnfy+YRvzjdtJtl+GUVNMosaFVudW7y2ACDRUqPdR4Kh3tKZoEREK8t5WG4mppTfkE63OVu8c7ryMk/vq4fhvVeInYJorhJVIAyvULhUN8p6veXQAr2+ACCrEvLATcAlV8xNUA4WUHo+DRDWTCRnLEADTgxxDeAbz5naSNWXLwBCmf7kkibJw7VYCddvwN2zE21iKv9GlnHkV7arJg8ZKaLmUeP8NxO+cgF9+T4VUpZJ1MR1TCbuzDvXZ6FPaCYvKOzpaQrbCWYoEKOJbpfASZyEVYTERJGRKUFukdRik1mzpaXjk6yzePcLxve9ltmkbkboK0tUlZKrd6LVnGd5h/L90K8IgUZdPpCbb8km6L2W2fycc+ZXKhQTkOaV/RGWIv5XRVd9IxbAChy3cXhu64sWf4u8OIHGkFF23OiYOetQkrQoHxbySMYE6zD5FdLzOju8XK5NJpOHLJqVdJPy5Udnq3sZiPLsL8O7OIdyYrULEyVo3qcbNeOsv4Wjj2zl2yyCZn/4nhE6qkXppCSRImZZpkRPkUmkLqWdaaTeJ8xJNk4mYZMIp0mHNMhkTS6rM3Kpt8oG5CN4j8Mwj+L54JzOHezjW/HbmqrcSq5YQtovUNdkY165Hr7W1ggpGSEDiLycBiFGbpwAigsHTtZ2Z/hvg1z9WWjKsRk39AyAvDr2/xrciXYWBVmiQuCqjtgFiQDRkmSLL5obk1Z98mKW+d2PsLEHCtS8bGDaYRONIIk2rLVcOqwzTXWjJY6ElB//ubKVJQjuyiVaLlN7CfMOlHG27iuMHOkh842Nw9BcQ84IeAc2maBhiMuYiCJEgEm3j2afh6FNw5En4/eOkn3iUzOO/hsd+Bk88BI//CJ78L3juF3Div+G3P8T4z3vwfewQj7VdzTONV6roWrLxIlXGn7khH65bR2ZHNhKJezmgWHmsACReY2kQ365Cljq3MrPnGvjJf6j8SlTU4PM0iDUGRHyjf2iQvyZARHur6Ig4wFGVEFQdk4BYWPS8HCCOokhXP+Eff4XZjiuIVxe9IoCIeZLaKTa85bB7mwqYb81jti1HgUQ0iUR+gtflEb2uGKPhIoJNlzHbdDmefTtI3L4HfnUf/Pwe+OHd8B+3wudGMT7cQ/C23XhursV7Yw3ew9V4DlzP4tS1LIy/h7nRq5nd/w7m9l1FaLgeT88OllquI9xRjdm/G/qbibVdz2LNFczVXsJ8/RYWaivw1rgxGlywuwh25UN1tkoGrmT6l7MvANGr84hU5+JpzGexfSMz3e+Cb34GdC9xGaIrALESMsrekuG/UtHwD4D8DQAiDS9DPsVZd0I6CTFLRJJJNEUAEjnJwlc+yJmWi/4HAOLG3OkiVS0h3wKVPJtvKWCmLQ/Zirnlr8kjurOA5A1ukteXkthZgV5bRbhhM77GSuYb3cwJsHa7WGh2sdTsxtPqwt/qVhRocRNsLiDYlE+gMQ9/Qw6++vV469bhq87FaN5GtHoLwasriF1VBddeDDVXQt1boe5StOrNxHZVqfox784cwrXryezOh0YpmblQDTF+OaBYeawARNuZvwyQhfYK5trfCl+4EyLzmKrU3/ZDRE5lZEShjLBcAZC/Bp+8gnu8/n0Q24GV9heAWMM4ZRyG3TGSaLYdRFI+mP8tpz48ohKEEmmSTrZCti9/K5EfAUequkAlEcUOX2h2M9viZr65GE9TMUvX55KQ7HRtKZnrCkm8NwdzRyHpWpcybxJdm9G6thDr3EqkfSPR1irCzZWEdlcQ3F2Kt86Ft64An02B2gKVxJNEnqIGF776QjUSMlBfonI2oZoygjfIwK9CNfhL8jvplgqSzUWEG7Lw1a5hqfZNeOrWqPCtFcaVUO7LI2m72I48wjtzWGrMYb6tlPnWizE/cRiC4mNJlMQ2g6WfVFhXQs82QETtv8Y/r2+ASOPakRALICKh7LCW9YX6XUq9JcRI0gfHH+H0+9rxNm9RodhXChAJkUrNkoAssEtAUcrC7nI8jRX4d1UQra9QtUzmDS7YUQA78mBnNunq9aTq85h/73pmr8ln/rpCFm9w4a0uIVgrSbhyIg1lBGqKCFbLAC6poC2y/SUr6y8l+r7GXPxt+QQ6ixR5W4tZairEU5uPrzqPeEOxer5kvYtUc5Eqq5mvXcOJujczs+tCAo0FLxsYDpCk7aLXWwBZbMpisa2IxeatpP51BHzPWZM7SD8ICRgUIOz+Wf77tY2Q1z9A7Ia2+kEST45TbneMjHKNZ0gbGsTm4dff4Ll9V+HdVUFkR54aKBWuc7EakkFWMaGaIsK1JWpUnb+uEl99FYHaKjWuRKJGcSlf2VGiRt4ZNdnEa9cRq19DtH6dun+0RiqHy4jWlqhtrKYEi4rUtbWaIixyEa92Ea8pVNtoXT6epizmWtcx15JlUy4Lu3PxNeYTqi8kVluIVmNRTLROg5yTy1xLLvPNuYhzHWhwrYqkzVKitXbmEel2M9ucQ6BlI6cHr4VnfmbNpyUdY/uIFhRWRFVe29hQT/d3ARDBiAMQ9VYKNHZ9kgKIOIoJ0Bbgh59kpu9ygo1lRHfmvyKAWKCywBGqKyNUW6lAEarZpPyC2M5NaIoq0apLFLOG6yVnsA7f7jX4G9cpYMZqyrBIgCH7Jep4BYrqEhscAhKXTWcB4m/MwrN7Hb6mdch+qEEKDHMRMGi1+QpMAhABijyvgMG3y4WnsVCRf5XgkOvI9TKNlerawW4XpxsvVLVbZ/a8Ax7/nnLUpWOk9spRIhZabCH2D4C8+i3gaGoLILYaV2aX3QkSaZTaCylBic3BFw7i6d5MrEkYNl+Vi0id0epIRtcVWyPspCSjbgOxmo3Eajaj79yKXr3ZcpKlTFyNpXCrEvHFlmzm29YiWzlfEncWSb2X7MvWrv1a3p7NaIs5p0LT9S60+jwStdnKbEPqwa4VyoNrC1QBYXqHW5mSco5VmWuFtSUCJyFu+V7GdayG5Fx2b1ZmnL87n5O7zlNaa777Cvjp1yC2ZJm4diBL+mi5ImA5+/nq88grucPrXoM42kPgIPtn292ydSURJ9l09aPvGJHbWwm3lxFvLlYRHLHjpZJ3NRRqLCLYWKK0kfgL4m9odVUqShWv2YReu4loXZUayhtoKsHb7GapJZ+FtlwW2rNYaslV58t50Xo53yJ1rYYSIg0lRO1tZFcRFp191uguqX/KJ1mXT6Y6H3bkg+Q4ri+EGwphhxuzxi7ErC9S1crisCdqyzCry1TwQMbdhxrdqyI5l8aNGLtK8HbmcqrxXPRdBYR6r4AHP2dVDcuQAztXKH1k9Y/s2QLslXDvX+Hcvz+ALKsSVd+gZsuURLoCyPHH8Ey8B6PFhdkiYz0KiO0qItq4Ogo3FRFqKlYUbSwmtqsUvaGUeH05upSIN5QTbiwluLsYX0sRvhYXntYCfK15ioLNecQaXarcXkruHbK+KyDWWIDeKMesoKZcYjbJOJbw7gKCzS517aVWF/PtFkmoWL6X90vWF6mK5UxNMUIpSXDWW88qzx3aXbQqiu0qJlVTqTSIpyub2eY3k2krIrnvbfDtj1rjTaQWzh4DvwwQmfFFjc957YPkdQ0QR3sIJqSplQax2l6VlMiUm6rOT6KKaQj88gGWBq+E1nwyLUVEanPQmtzEmopXRZHdxUR2uxXJdYTijdY21uQm3Owm0FqIt80if2uh+jvcXIjWlE+8KR+tSfbPUmx3IbHd+RYJgGyKNOcRacl5PjXnWfdoKWapvZjZzmJOdbs52eNmpsvNggyFbZH3c5PY5casd5NqKCbRUEx8V4kyM+Udws2rIzFT9etLlgGy0HYedBeT2Xcl+hduBn1WFVeenWLIcRb/AZC/gvKzAOGAYxkgdhRRaq5UqbeNIhFaz/3nF/ANXKoAQoubUHWWYh6HyVezFYYWZtd259lkMXGoNQd/Ww6ejhyWOmWbR6A1TzF+QrLYdYVkJMrU5CYkz/I8soAk4LIoX4Vy/W15rKRQSz7xxmJl4lgMbwFWQCFg9LQXstRhkewLUOU3Aa7zrrK/WlJ+3LVFGA1leLuzWGw/HzpdJHsu4rRUCcwfeXGAqBGVMsJTek0lSP4q/LKam7yGNYg03AtImH0l2X9KM4udq5xASzjZVq8M2LF/MBIcue9u/AOXQ0chtOYS3HkuWnMBEdEmq6Boi4toSwFaS766jlxL9iOt+QTb8vF15CKmh5C3M5tgezZ6cy5mYz7UCxWo84JtBTyfXATank++dhe+9gLObgsIt7owmmQ8SolNRSTFZNttPVegvQBvRwGLXfksdMlWzrfuFWktUM8uzy/7kRbXi9AL26WESMsKai7FU+0i1FaqAOJrPRfackk2l3F85Fr43Q9UTZb0i5hZ0keqS6WPVg7DVUEV6UyJBzsduAI49kzyZ7veCuf/NQD2NweIemm7ASwbyW5FaShFYh8JOWMhbFTY2DH0FKFYnEBaJmawf9OTEI1DPIVKEkpnhDw8dfdBwmPXIqFW/7X/C63h/6J3FBDrKH4F5LbPLUVrq0Br20CsfQORjgrCnaUEu9wEevII9uQQ6c5C78wi1ZYDLXkgJlPTeQR3WxRqXkOweR3B3VmEmnMJiXllP1u0qwShSFcZEdnvLkXvKiXVWUK6vRizrYhUq4u4Ddjobjk/l+DuHIIt2YRa8wi35RNtK0BvdxHvcBPvLCTTX0Zqj7UfFaEhxyvKJbS7AL2t1KZy9LYK9NYNyxTs3MCZvnJO9RcR6F6P2boGGtbArlx8HZt5Yu+74blHIHpazSujy8yWUuErGJCpkJYk9C6DWVIQS9jDcmWAlcxbJgdYCRQVJl4xQbhUTJiq5k7mPZaLvXqfvz1AnBj5WfFgN4wARIAhWmAFQBylImLJljwpE8JpmXTZBoiIKl2mNMkQlepYmazg+GMcOdSJv/cdBGtz0Rr/mVjLG9A684h0Fq+Kwp3FBLtKCXQJEEoJd5YT6Sgn1lFqkwBPAOQm1ulC63Ap5jTaC0i0uZBttD2HyDIJE+cqRhZmDreKlrA0RailUDndgd2FquZLEnzeXXnonUXqurF2l2J+0QYWMCxm1zrc6nfZxtoLcY6LthUSEQ3TkoO3JQtfS5YCUrglj1hrAUabm1R7MYlWtyKjrRi9vRit3Xq3SEcp/p5iTvWXcKbfRbgzF7NlPTQK5aK1lnFqz0Wc/FAXxq+/BimZ4ySKaWhktKQFDBlyvDLEJdEUGXkY91ujEJVQtPIoAgM51NIvUq4SVmR98/cMEOHpleCQ/WU9LA6FTUqb2ClZOcYGhxxtJFLEjJTS2nYLKmUkQzp1meTTnIOf/RvHB65Fb96OUZ2P2VeAv20Nka4CIp3uVZGAwtu9AU9PBd49bvx7cgj3XECs+3z0rgswOrIsBmupING8WRUW6i1b0No2Kk0T7thAcM8lBHovw997Ef6erfh6qvB1leHvKsLfUUi8v1xRYk8ZiT0VJLorMbo2kOisIt6xAb13I9G+DUR6qwjvqSDUU0mop5xgdwXB7jKWWouQ6Ja3vQRfRzGBrnL1e3jPBoL9Vcz3livy9pap4/WOEhKtxdaUq40F0JxHujWHVJul/cI96/H3rmexfy3evvWEOwrR2oqItZURtSncXkKoo4iFnjKe6d3MiffVE/7OR+HMb0Am7ktrpE2ZsClD1ATNnsPMshIMezoima9LhuxafS7yUMAhW+vj8IYwwtH2mK0AACAASURBVKv3+ZtrkBf6FC8KDtU0KxpiBaDiCQMhmR9agUa2KUvaxJUenwf9d6S+cjNL3ZdD6zaodcPQRubb1qsSiWhXEashMXVC3WUEe0oI7ikk2JtFqG8N0T3nE+9eQ7IzC7O1ALO5HHP3ZhK7t6G1bCfcth1/5za8ndsI9V9OsO8tCij+nm0Eujfh79qAv70Uf0cx3uZ8fM35BHbnK5NHImDK92kW/8ClGFGAtJIsX8XyN8I9xaykUHcRDgX2lLDQVcRiVyn+7lJlthldZUpzpFvdpFsKSLflYrZnk+jMQutZR6j3Qnz9F7K0d41633SzG5rE9NpIoHMjiz0bWNhTgWdPsSU0ess51V7BM3vfhudTY/D7H0JK1qAKq8k2ZhI6c8kUkUzGsrClRF40iZhYQmI9KCjZUUpHOFqqxP7y7xkg8m4OwysOFyDI2zsSwop2yOwXzmErtzK6Wf5W/5j2HAEx0FOaWhIMjsLv72Pp5huItGyAzougqRRz31Zm2wsJdYttvzqA6J2FinnS7VkkOnPRugqXr6V1WXZ9ol2kbw6pVrH93Yjd7unaxkzPW5jv2U5kTxnx7gLiXUWWuSQmTGs5sdZKoi2WTyO+jfgClrlUgN6Vi96VTbxrHVrHhWida5ZJNNdKCrW8iXDrm4m0nUus43x1XLz7QoyetaR6sqC3CPaUwZ5i6Cki2eNSzxPtzifcnUeoJ5/Qntw/okhPrvJj2LUBdm0m0bJBaadZ8UkGyjk9UM7inlLlR9FZgtnmxtNejveDTfDYVyEzq2Zc8WCySEZNESTTM1n9qEqwQeYSUHPAyJgFMaDtyQGFPeRPoRVy89WAyd9egyyDwgGGAwirLFqcMGuCaqfe6vlbOdoBiClDVMW3k/GrMsWPcZzQo5/lzGc6ebarglBjIXRuhLYK4n2bWOouI9xTSqy7eFUU73JBe7aiTHuBkrzJ9kqMjg3EOyuJdxUrHyfWlaUcdHHU/T1uPD3lLPRsxttdgdaejdl+gTJhku35JDrcGJ2l6nytu4pY9xYi3VsI7dlCsG8zgf6N+Pduwje4Ed9QFWe6rNzH6Z4SzuwpZqa3jJneEoRRZ/tKWdi7gYW9FcwPVDLXX6a+l9/P7CllsdtNqrecdFcRiY5Com15hNoLCHQVEuotItxfQrC3yNrfU0S0pwi9u0iBOdlZRLqtlExzJWZLFVpHJYEeuUclswMVLPaVEepxk27NgqYLQbY9JSwObufY+xsJPHQvxI+rhT/DagWqJFEzoeYPVh0qnSrLRygUSKeqjrU6X76WiMzfP0DssJ74Fyu0howXsIBhgUN8CecIZyu2qFAsmSSeThNPmWoOWjVBmRGGM0/AL77KsX9t46n9F3O6w0203QWdpdBdTqijhEhvJdGeYtXx0vkvl2KKWYpJdhSTaSuGllKbykm3laN3lePtKWa+z83sgIv5/jy8vVmEe9aS6DxfASO9ex1mSzbptnzMrnxSfS4Sg0XEhyuIjFUxt7eS0/u3cmb87cwcvJ75W1tZ+mA/3rtGmf/INOZ370V/4F7iP7gP44efJ/ngl0j9+IuY//UV0j/5Mvz062qrP/B5fP/xUc588Q6euecmnvzIAf5wRz8nJt/J3P4tnOyt4GRnKTMdxfh6KtH3biWz/2IFVKOjklS79U7yXplWi8SJj3bnEugtZKm3ksU9W/D2bCXSWYnZng9ta6BnHVrdPyEROnPsYkKjb+NY18WEpnfCN2+HM/9F2jyhpmoVq0F8SS1h8b70tQjHsyCRCejsUYoCDvHa/741iAMQx5yytgIQmQjO+k/A8XyAOOCQBtRUOtCad1eNGpRZA71HSXzn4xw9UM+xfVfyXFsZsaHNJPZWqmiS0Vem6qBifWWEbcn4csEhx4v2CfVsINwtGqOcVHupKrVQ5RYSnu0uxttTynxfKbP9xcz3F+DvzVJOvNnxz5jt5xHtKFQOdaC3iuC+LQRGLsY/fQWLN76D2Vuvgf84AN+9HX5yLzz2HXjuUVg6DiEZHSnj1kPWeHZdVqKVCNCfIRn7LueF/OA/BY/+O/z4bmJfmWLu7g5OHtzByeGrmN/7Nvx9VxDu3I7esZVU20YyrZXQWm4JgdZiUh0u5YcsDmSz2F+EV4IEXRXEO0rItOVC6zqMhjcqX0zrkQBBKaHujST7Loaei1jq3U7gK1Okn/0OJGfUNEWJpEnAgGDaWqBKcGCFciWaac/Q6Mx9JoygzIdXw7iyrvk3NrEEIHYo9wU+h0gOAUYsqas1ti2QpNXU/VpcxzBkjiWZORD8yTAZAUZiFk79HP2rt3D64E4W979DRYciPZuI9FQR2VNBuLeEUL+bUH8h4b5CIr1uIr3Fq6JwbxmhgYsI7NtOaGgL4X0VBAeK8PVk42lfy1LzGrQ9Vpg30laI1llEvLcUY28xWl8eS31uToxdzLGb3sviv7ahf3EafvBJ+M234blfw+yzEPFDLAq6zPUlQ4nFiXVyRLIvs+OtklKylqBMYC1zZy2A/1l4+mFS3/8c/k8eYPHWDuZGriW4753oA5eS7tsCPRXQ6YaOPOjMJT5USnS/1F/lYu5dT7L3QrSutfhac5nfXajMQ61rE2ZHBbQXQ1shtBdidhYjkbRj+7ZjfGUcnvsRhE+QNmJqfi9ZWkfm+1KznCnr24CUzOIijrvARozrV//zGgGIo0EsA8oBh4BCZpaV5Q3ipoFuRLEmfLYYJGnGmUv41YqzSgL9/rtEPjPK7ORVeIYuITl2GWLHG50Wad0bCPWV4B9w49+bq8gCyWoBUkJo72Z8e7fgFZ9gpJLAeAWBURf+gWx8XRdi9rthsBIGt2Ds3YZv8BIWxt6B933VLH1sD8YPP0ryv78Gxx4C/x9AZlrXRDNEIBg5G/wXv1XW+FieVtQywxWLCAOthjIQ0ZJoyYwValeTLAgYF8H3DMz9Gn79NTJfO8zcjdfybO9G5nsr0PZvwhypwtxXQbK3HGOPi8SeCzH6ziW+91wi+9bhHShmsbeKQM+laB1vISPRw5YKCyAdORh7soj05ZIa2Yxn5C2EPjEIR36gZoGUyR7mE7AohpcpARd5P5lXQISpRjIZQktbK7682lOXvgYAskIa2supiea0oCLbNEY6QVQLocl0OCr0JwkliWzEVJ4jkzoGj99P7GNDLAxchlcy2AMlxPaVou8pJdlVCe0bSXdUofWWEtjrYnF/LovD2QT3uoj2Fa2aRFJG9pSgDVcR2l+JZ28xnoECtJEiOLABvS9XZdElJzAz9nY8HxtSfgKnnrKWbwuKiSTTg9raQGL/GV2tgmUtj+CYmxZWBCDin0oCWvalrVb7EQmsEttSki5zc6mtRAXl6iLDpdjwGOiPw6lvk3j4Q8x+dg9P3nw1j49cwrHeKlLid7WXkt5bTniomDP7cjk5uJ6FoXxi+ysxujeT7LiEZNtbSXRcjtazmUi/i+DQm4kOvgm616vk5cz+K1n8zCippx8EM6T632OkCMQzhGPidwhARAoY6EkfYdOrVl5XczKvtgH+gvP+tgBRkQp7PT+pwzkbrX0+QJKamhldqVgjjBlcsObXTcv6GsfxPPYFTt3ejm/gbZh9WzD7iwkN5OIbyEbrc5HsLoH2ckWJnlIFioXhXIReKUCMgVJCnYUsteYQ6HRhDFVhjm9BG97AQn8R0ZveQuTO64l/aQR+/jmYewIkiGAzpRIEUhKehFjcJBo3iCY04qZGAs2eBURmApGAhQWI/8mtU8Yhgb9oKkM4lSSc0YgisSUfCRaAU8AxQDTcb0g8803+8O338+Rd7cr8MnsvJdS3nfl+GW5bxtxgEYGhIuJD5eji8HdvJdJzGYHeS/AObMSzz4V3+M1E972BVOMboK8cfeRijo69g2c/PUbm2YfU9EwChlQiSTQSJ67ZdXWyTHVGpuQOqxls/r5LTQQgCiRnnS0l1VZoEGEGNQuGWndcJK3M0O6B6DzEjhH41Sc59slWZgcuJdWzFfZUoHVmsbh3HeEDLqKDOaR686FD7GY3qT3FRAfceIfcLO13E9orf6+OIgOl+PaUEeqtwOgpJdNdCj3lsHczxtTb8PzLTvj5p+H4g5A4LXljtU6JP5ViMawzGwgRNDQFCN1MIsuzyfuKNWGaGSsq5xTwKfPCWbZM5hqWBXGseiWnGVe3XVn45wRJ4qRFi2WiJNJBYkkfkZRHMWWcKBphvJlFwv6nmP/mR/D+6xBnRq9lrutSol2byfRvxezfRKyvAt9gBYv7y5gbKWVuxM3ScD6+/XmE9uUR788j05YNXW6SQ5vxT17Oczdew4l79pF54n5IzEBKSoV04ppOMmXlwtIk1eJ7ii9e5TDW316DOAARBWIXLQqTiGRVklJpFjHD7BIEWedCZicJncH87bd47Jb3MDu+lVTfRugsJ9GWh6f9As4MrcN/MJ/ovrWk+9ZCVxZ05ZDszVcACQ6KJikj8goBstTlVgDR+qvQ+jajD10Od7TC9z8Kz8oquIvq2WViackfz6QzLKUFKhLUzBCKB4kmIyQyoiXOJkOV7y02j+NbSORGrbEh64s4AJGrnHVWpSkdLfyXbOXiGbWIj5irTjLOWqJN1i6MabJMnMwSLzPoy8TYsGgmmU0nmFeGWIq0/wQceRi+fQ/6HYOE915NqGWLKtgUcza4rxDvcBaLYxewNHoBvuF1BPflERkoUwCS+jTJvxgD5SQnt7M0spWjo5exdO8gPHW/NfF2Okg6k0A3TeIpazHSjCwrZ8gqodY7v1r//u0B4jDACwCiwCHS1JnNXCIYRggML/hP4v31jzh272HmhreSEQ3R6YIOF+E9LuYHCzgxms2Z4QuI71sD/edC77nQvwZtMJfwYAnC0JIs1KSj9hatirS9LpjcgD5UxPzQJvx3NsGDn4b5p8CIKZQLvrUURAzQhcdVT6ZVLZKmiZ0vDJ5W852bmRSptEhHK/n5ohpBhIhNZ5ljZSO+jH2ZyC0WISMr28rAfclkv4gfGE0k8IdlASA7ky2HxSGyFLAEl0j5yAI8+RDpL91B+OYmYvsvJj3ottp+4J/IDP4TqcE3EB9cp9o6vHeTigDOdJYQGChH31tKRLL2XbkEhysJvO89nP54Jzz9HYgdVwBWII2nSciO4DlqC5BXCx0yu7u8qwVCJyehQgbqO8dRtjpVDnT8BZHo9okrStXVdV7498t+eOlgub5M/iYmlTT+HERmQJ8Bz+8JPvh5fvPhcZ44cAPcciUMCkByoN+NNlLG3LCL40NZzA+vUwDJDJyLUHLvGqL7cgnvKyLeV0Wytwqjv4j4gMvqtMESooNFimKDLrS9BcQHLBIQRQdLCA+WEdpnkX9ISr3LiN1VAz+5C+Z+CvE5VWQnTq/Id6nkliWRRQEobSDOpvRwRhxhXUlwicwlUjpGUkNPxRFzSyI5MuRLcmEOOf3hbJf7ZYXHptruL/1bhI+Ej6XUPGaQihsqfK4nU8iiQ/KEihclepZIk1RIF7SnIJqBeFqZPoYCV8oSCoFZePS7ZD49RnjyCuJ788kMnAf9b1JgSQ2st9p7oJLA3q3M79uOZ/92fH3l+NrWo7WdR2ZvLsbERmYPvgX/v43CyR9BJqiESzSWsCbhcBpFAdriWcWTTjvL1uFRtSti56yGtnj+zzPnOdIIEuZLRHzWRMpGkIweJRQHX8Jad0KTq0lvSLw9IYVkMjm0tVCNDGlVK4DZW8GQREaU8ycxBzt8n5Sl+WxGERvbuaSEcWUFc7VMmJQTqHU8vJCYA/0EBJ6EmZ/CE1+HH3yI1OeG8XygnpMT7+TkwEai+0tIDuaTHCwkPlRIdL+L0IiL4GghoZF8tOFc4vuzFen7c4kOW8doQyUqY53oySLZl4e+v5zo+EZi4xuIjZYQGxJgZMFQLum9uUT6i1jqK2F+oJyF4U0EDmxn7rZrmPv6nYR++6AFYDH/TA0jFkXTk6TsUv5lIbTcYbYQULkfQ6U6rVUUrVYQplT9smIrESvhCetazvmy1LSMo7BzI85WKjdtyiSd1W1F4NjHiaZQJHMi2ReWrf2gspF7Oc8hW/lbfSQULLNgJERD6giYxPSSY6TfVWjNG4Hjz8ETD+O9o5fZ4Svw7CmFsUoV2ZP2jPesx9hfgXffpfgmrsI79lY8feWkutfAwAUwlEVsbw6nRyvQ75+CpafsnE8aM2pYkb+krpZ6ULP5y4vI/QUrK7MG0lRKP69MPC+/qv1Sf3pzjrycNL6KMZuy4IosmSXrOFg2pyzMIq6gxdEyCCmo1vjLxCIktJgFBFuxpFMmYhsSF3tCA01UtzSmbkkXZxZzZ/bymAf96E+JHvkRoce/jf8XX2PpwXtY+vYdeP79MIEvj3Lqrnrmb78e7/veReCmywkf2kZ0spLoWCmx0SKL4YXpV0H6UCHJvhxM0RLDZUTHNxCcqCI4WkpoqIDI3vUw7ibRn6Wknj6xhfmRbfxh/3ZCH2uFR7+szD1JWjqfRCJBLBZD0zQljZ3vX3wrvWnZ/zJ+XiJVKxnTAYnDoMK4Vu/rloMumeWUQSadIiMqSqSVSB9RVbL5c1slzUTQ2bafLXUdPpNnEZKntLFjD2Kzw+xqQRxL0zjaRiaxFPmZ8SVhdhGO/Abzm59k7uYmTvZvwz9YRmaiBEbzVHLRN1BFYPhyAiOXEhnaBMMlMFkEY9kk+y9Q/ey9/Rq0733YWutEVqtKGKTj0ioy1WzMjmbZAHEALw8u+zZABB4yq7z4U897nxfvmOVvzxEAyMupj7Lzg6QTIaJxHa9usBCPs2RohBMBjKQPM+0V11I9WIogaWbI8BzwDKSPgnEEor+DwOPgeRROPgJP/wh+9Z8gSwR/+cNkPn4Lidsnid/aS+TA9cSm30ls+h1Ep68gOnUZkaltxKY2E5ssJzRWQHgsh/D4WiJj5xMZfxORsf9DZOz/IzL6RqIj2asChwBK2+9SmeDEUBnJkSLi48X4xzewOFbJ4nAxS4NuVQ+VGK2AkQLM0VICt1xF9L4x+O13rYnolJMr89IliMfjpFIpMlK6ncmofadpX3wrjCzhSyHLbBXmdCT4SuZUHa0Uhy3BJR8kmfCMwMoKdr7YViw6iY4JU8jvcm2HrCSb46BLyMAyQZbVuxxonWidZONOvhKRICR5KusJFCdawJRdeXjh4XgKluYxHrqfUx8Z4pnxK5kbLCYxlAWj64n2riXen0+mJ59kr5vwyDYiBy4iPOrC07de8cLMyEV4b3sP/PgOCB612wuloSWJLIsiSTDheRpkBQqsuj4Bk5AVKZRXUzLkxTtm+dtzfAYE4qBpcUjIEgGiQXyoiZ5lXTtZjEXq95NnVHUskT9A6Aj4nwLvf5M59XUyz34W44kPE3zkfSx8e5JTX+jj+CdaOHlXPSduuZaZm65h6dA1BKbeTXT83ehjV5EYfhfsv4K0lB60rrOofS10rCXTfSFmz/kkxLEeW09mYh2pqTUkp8/DOPBm9ANvInbwjcQOnE90NIfYiGt1NOzGGKtSlBgrQRsrwj9RwcJ4BXOj5cwNl+Ef30RkfBOJySrSH7gavvMvajkzlctIWhJbgBGJRBRAlltWBJiYpC/1UWgQ6b1CggtzvZBWMqpjPigT12JKh4dfbCsOdixpoKVSyq8xMk4pqMXcsqSoQUwZuSJd5Vbq/nIxYXKH5L5iItt8L5aHkPxsiWo7BJ0W/8qW5vJ4coK0kyyc+uzPCX39Vv5w8F0cE59utABz34UwKIGUtUqTL+zfyqmxS5gbrWRufwmBqYsIT28jNl1B5K6rMR7+FETFz4OIWPzqGawckWJ4555yXxsBFkAERJJ0PeuHOL+/VBedE08lEScrbUSs9fOiJyzJ/8y34NHPwoN3wPdvg/tvIfXlQ0TvGWPpQwPMvL+ThVvqWRi9FN/wJvwjm/GNbVQMFZjYiH9qE/6pjYQObyZwuArfwUoWD5QyN1nEmYkCTo/msTCUA6NlsK8Y9hXBYBH0F5Lqz1cx8uhArpLy0RE3oVE3ofEiAlPF+KdL8B8sJ3SgVEmaVwKQ1HgF8fEqgpOb8E5swDfhxjPhZmZqA3OTGwlOVOKd3Er0I+3w8/vAd8Raa1ySe7bzqurCEgkr4ma3tmiUaFTc9D/zkU50ACGdqrhcfAMxfSSDbPsJDvfLMSIsbV9PggGJjJVDEfloaRFHq5hKeyjZmZHsQep5f8tKHWFSyh6I2KFcuc3zJPELASIxhhf4RkozpXTSMlow4YOMU0Wlq+XvTPFX0mIO+sHzFNGHP8cfPt7PsQNXkh7Khv1vhulcMjeWEzq4lYWJS5mfuBzPgSuUsIrfUoV5sBD/SCGhuxvgN99S682L5SPPIs8szaIwYfu/0qwWCXjEz3PIEir2j3+mc+Ac9VKiNdIRCBwn9N9f59i9+znz/muIve8KEoc2Yx7YTHzqEiITbyE49jb8I1cSGroMfe8WGK6CoVIYKoEhqTsqJL0vj8S+XLShLEJDa/HvX4t/bC2+qfV4D67Hd2M2gZtzid5UQGK8EEaKyIyWYI6VKDMmNVZOcqxSRTK00SoiYxsJj2wiNLoF/8hWvCNb8I5sVcCMjJagjbpXTfGxMiLj5XgnqlicLMc/6cY37cJ7eAPew1uZP7Ad874BePJ7EJoDQypKlz0Ba3KOdFqZVIq3bPPKMbNeqgfOduJyb2LNdCdOthTnOSTOtQUMcYSFKcR6ERNHysPjCYmAxe0QsZSKOBGbF9864WQtIwCxAjGOLyq3UcyzUhLb9xZQOiFm59klEKFAmohjGCFMQxYp8trl60FSpoDFHs8hyU0zCAkv0T/8kvl/uw3/eBXpobXo0wXoN1eg37gR7/gmTo9eyszEJSyNFhCbyiE5uZ7wcA7+qYuJfX4M89nHl4fjOs8ijynkyBILOKI1rECI1WK2KeYIJTn5JT7npPynSIdnIBmA8Ck8D3+e37x/J8+OFmMeLoDRN8LIG0mOZhMdLyI0uYHQpER7yjAmpPAvm8DeLCKDOWj78zFGXSQnXKQni8hMuUmM5dqUTWIsG2M0i8ToOoyRtcRG1xM56CZ4qJjAwRKC08X4J4vwjxcTGCkjMFKhIkcS3ZDx0+GBzeiD2zGGLiO1/0qSIxersG5srJDVUGSsSDnkgZES/KMFeMfy8EwXEjjsJnmzm+htF6F9dRKe+xnEJaMLft3qbmlXMyVh2hSmaSpSOZuXaOwX/mR1rGPDixx27CdhfWdfutkyfWTPAYcDEHVN5b+IDyN5ColWSeTKIJOMk9AiJPUoqXjMmjBBQrLyezqpQC3Xs3wJxyKRu8i9rXs6v8tWgUcBR7SaDV47IJYwIW5CTGaXkSy8MsAkT7IEyNqQHtLxBTJSjStMLNp39jm0f59Ev+sqZqc3sTRRijblUn6nRK/OjJah7V9LYug8jLE8olOleCY3MHvr1cS+eYe1arDSThaopT3l6eV9HLJmQHkBQBzwy/bPAURlZePyItKwUcK/e4AnPtzOsclyuNUF+/8X7P9/SIydS2wyj/DBUsKHK4gfLiFxuJTgeAH+cReBCTehCTeRiSKi4y70CTfx8UIFltR4AamxfMyRPFJDOST3ZZMaXI+2fz3esRwWpnJZmipQzOmbLiI4XUJ0qhxtagPahPgIm0iObiY1shVz+CIYvkRRemSbKgpcDTjknPC4G+9IMZ4RFwHJ8I5egH86i9BNbqLv38T83deC/7dqzUDhKbF5fRlL4lpdIZ2dUqaVOOcrASKgEd/kpT/SQ1bnOSsvWdEWcTytDLbjBJ/VWfYKthkJKftAPw3h58D3NMz9Fk78itQfHiH++x8TfeKHJJ9+CPOZn6rv1e/+oxA7AfHTYMxb0UUpo3cYXkwhO6sugBVj7awTLuFKu6hSwsiqzN72n+xKY9FEEviRtXZ1Ff4JQnqRdPy0BZC0rFNoB4bk9aNPwX99kOMf2MGxye0EDlWg31RM+MZStEMu0vvXwPCFpCbdyg9ZHC9jZnIz0Q/VkfrG+6wlLdSzW0EOAbGAYzmqpt7BgYsEQ+zkohz4lwBE5a1slS0Pju7H+8h9PHf7e5mfKCA2/Ab0oX8iPHIBwSkXnuly5ibK8E+Xod20geBUGcGpCkWhyQqEIpNliqITJQjFbNLHixGSaJFFbvSJfPTJvGWKT+SzTOOFygQTMywxJtrITVJotFiR/C1AjE0Wroqik26lrbxKjWeRPHgh8Yl/JvIvJfDgYdCetMpaZGJ4ex1ykaSW0HEkvLTyS38EPLpujWER08v5SA4jri2RSPqUByDgEEhJ5wqjiQcTjmtkpP5MCjNTpyH0Gzj2Tfjlx0g/cCOBTzQS/WgN4bt34L/rOjwfeA/zt13F7C3v4Mwtb+fY9GWcOHwFc7e+E9+d16J9tJbUZ3bDfR3w5X2qnJ2nH4KFp60aNwmjSoLWtH3S9JI1mMk4qRK1GVl70IgSNNJEpTRAluU1ksrmk5GAQUAMLClVl9IaaTe1aKdjZknEzp6pRDGoVEZEn4Hf34/vi8OceP+78N6yidShtaT2/2+Sg2/CHF5HeiKPuPieN27Ff+M2jOkNykcJf+cTVhY/EcaMSvGqNf3TYjyCpoSM5YNYsEla95YucwDidMaf2J4jLyDk5DvEZDCO/YLFL45wfHoL4YlczIn1JCfy0cZF/ZUpM0tCsGJmCRhCU2VEJmxgONtlkJQRnRCywLJyG5sQoLjPgmA8n4RNFkjyiE+cBY/Yos+jyTxik/kvCQ5R2S9F/uFcQuM5hCbWEphah3nXBnigD05/H+ILymwRwSFttBwOtxX4yymWE5CIMy+RLdEulo8i/kJSMVw4GkI3pHpXACg9KEwXAP8JMid+jv6rL+K7/zBzn2rm1O3v5PTN21g4UI55axXcVAaHy0kdLCI5VUR8sgB9rIDYmGSkXepv0ebaeN7y97GRPPxjpfhueQ/zd7TgvWeU2P13wi++Bsd/CsGnWewSOQAAIABJREFUrahl4jikjoM5Axlh+6gan+PLZPALMGI6BMOkQgaiMB1eEpDMZ2TuEqvd5L2U1lUmoJPIdELWsrLvKTVoau7rh/j9wUtYGvg/MPZPcFMuifG1aENr0MfzCB3cRHi6HCZySB7exKm7u+Hx74MmGnEJUrIMuEwimCaUljCEFbZQ7SlWkoDT0RxnZdWfgAeoRKG8lLyIkPrI6Lxf3Mfs+68ndnAzTBXDaCEM5sCgC0bKYX8pZl8+KQmPThYRn7C22qSzLVLfy29/mkrQJsrRJiptKle5j9hkKdHpEsIHilRFbuigi9DBAgKH8vEfzrUpm8ChXGLTBS8JgJcChzBScjKb6NhavBPZ+O66nPSDN8HSTyB5gozhU1BQhpI0pmpQm3ntBJ/V2k7D/fFWolkCCgGIbJ19OVKYRq4dk7XSRTWpS8cgeAaO/hfmw/eyeO8w3o8147ntXSxOb2RJqgRGckiM55GeyleCy5i0BIs+nqtsdW0sh/hoLrLlcCncWAIHizEPujGnXDjHByeLmb3xLZw4fCUz0xfhnd6Cfsul8OHr4EsD8K33wZmfQeiYmi5GTC0pwg8SJYIMWJIKWystYbstjnqVOfsIJayRJSJYxMhRIJE2VOZZ1Brbk05jZsScFDEUJn3mlyx8dRrPwU1oI2tI35hN4uA64pNr0cfFhy0iNV4E09nKxD86fSVz98mIxAdAgVnK8wXESfyGGHp2vylgiI8mZDvqf9xdf/TNOVa1pl37lcpYGVnJpi88hv6Nmwh94N1KnaWH1sPgBTAsodkC2F+golWm+Bc2JScKEDImz5IwoZA2ZZEw9DJNFdmAqLSy41PlyvcIT5cSXgGQ8IECwgfzCR3KJXQo26b1hA9m/1mA6NNu/hQZ0wVwcxHBiRw8H3gLPHirFeJOLZJJBkiZcdWxwruKeUX6qQiRdKZjqyvD9I8a1vlC8iOiORz/RLYCFkVSniP952BOxpk/8wsSD3wC/z19LP3L1fimNiFCJzNZCFP5MJUH03lwoAAOFpAczyE+kWMx/VQeyelCktP5pA64SB0oIDFVsPy3hErTh4rIHHYr0Ji3lBG4uRLfjeVEDxRgjGeTHlkLYtIe2IL/pqsI3TuM/zsfRzvykLUwJ34y+EixoMaLCOOr9hHGl3yORN5kdsSkTsaQ3IP1+/OOU+UqkuiU8K+pJsEU40hpaIl0Lf0GfvAvRD70LuZG1pO8OZ/MbRXKWtCGs2HKBYfEesjDe/PFPHvL1YTvPwCen4J5EtIe9ERImaeqH6R9paHl+eTeapIQK8Ln9NOf2p6jXkYShAkdMx5VJM46yVk48QCeL/Vx5raLmBk+j8jUGzBvXoN2+Hy8h9YSuTGflHTYuE0TeTCRp+xFczJP/SYdJIwoYTztQAGxgxZFDhUgFDvgPksHC9EOCBWgH8jDmM4jMZ1DaipLkTm1HqH01FpF5lSWurZ1jnPu87f6QRcvRfFDbvzvu4zQNw/D4qNKikXSCbwJqYsS7rWljlOEqVryLwfICxteTCtHm6hybcl1BL2YR39J8PufZunTQyze9l6CUxswhBluzIcDa2HiPBg9F/avIbN/HYmRXKUlBBzxqWzVVsmDeaQOFZK+sZDMTUVkbnKp743pHLWV4+JTuep4fVLOuwB97P8lMfW/SRw4n8RNBRg3V2Hcsp34TZeh3XQ5wUOXMX/wck697z0sfWEvyUfvBd8vgTNqftyQDNu1eU/5LhLmjS9ZVdcyMExK9O2aNAGSEgiqUSypEIj6kJiXaFJxaSxTSIYzHIHffZngnVeRvHUzocMb8U4XEz6YS+qmfNKHRBMWwPu3MH9oI8++/53Ef/JBMJ6EzDwyGjMptWdyTUGpyDYFEvlHqgYkAGFVD7ywj1b+fQ4pAYc4ZGEy8TAyDkBFtERCmrPEfvkZTt1Ty7ED2YRuPI/UbRcQuvHNzNy4Dv9tJYqRxYESElA4JMBJKHo+QFaCRMAifys6mId2MG8ZGNKpFjhyMCdzSCvKgoksGLdIvhPwvRRA/n/e3gO6ruu689b62swksS1ZYhFI9F4JgBSpFpdxquM4TuI4zngmseOZzKRMkkkmK7YlkUTvAEH0QoIA2FQs23KXYjuO4xa5SLIkq8vsRH29l9+3/vvcB0KyLUfkzGCtg/vKfe/du/f+73b22UcA+GlD2biLLXcQvffvXNOAxJLNtCrQlEYT482tMq2z4T14BJZ2FISMq5tp+hMfK+4QMF7xlwzAc18i8+URLs7+Oc93/ibnD9yB/0CTaX8+tgP+7kb4nze4499tJfP3BaQ+VkHi7hpi+2sJfqyUwN2FBO8uIbS/mMiBcqItZUQPVhBrLScukLRVEGuptOc65t6PHyyEljeTPfgm4i03E2wpZf1gLWsHGlm/R9a8nszBRuJ3KdOnJQT1vNT5qyyf+Esy/zIHF75pzTKcKlHZiwJ8zXOsQUxLE/TcZZfMs/KsiYDiwJIhkgkSS0eJe3OiTq8nHG3TF+HRY8RnPsgLd+3j/D2NRLtqibeVEd9fDa31cLDMUvLP3r2Lp0beR+KJU5A556JGscbhwf2gQKLrsdIUxSeql35t/l1nlWWZhMuZJxLmOMjUKS4x5q/8kMgX+swFibdUQlsJyXuKWLm7hrXWZvwHawgdrLQROVCJRnT/lRG7pwo3aoypYuyVUUXi7ioS98ikv2rodRubz68jcXcdybvcMCG5p+Y1LUT8YA0/bSy338nlEx+DJ78IMZXSLBGO+S17pOktBZqiw4YLYb6ry17JZ47napdeIfU//kRxh4oXFY/oT2BZWVkh9NSXiB95L/RWE7pnB/67dhBuKSPRXUeso45AazXLd5Wwur+SYGsdkY5dxDp2EWmrJXSgytzOtf1lFospPlNMpqPvYBm+gxV2zL2v5/6WcgKtVQTbKgm11xDqrGWltYLltjKbvF1rKcR/sIDAgZ2EDuwkcqCQ5b/dRuxAFfTshZ7biO/fbe5w4uBt+A69h8Sj04TOPALhZ7117EpbuwIAzY3YxKJueqOCO7cAK2OrK6XJ1YQhHlGdX2xjnkeTj6aok0skHr2XlZkPstZ1p5tMPFhF9GA1mdZy+Nib4WAeq+31fL/jLTx2/G8Jq/5PYLVCtE0+3gZABA7FKWGviu3HeZZ75Tp/yN1QOu1KTsRCmbt1FeXqiUozH/88q0f+C6udd5Bsryd9sIrwXXVu7uNgDb7WSnwtVfhaagwwAk3wYJUxUYCJSfhfIfB1JO5qMGF/NUB07mZQRQWAe2Sp6ojsbyB6T8OmY50BUlozdLDcgdPOd6DRb4dbq4m0ViNwJw6WE28pJ9xaZ77rM92/R+Kxh12aUKXqcR/B0DqBVNqyesvqGu/R44olcbZapDFxlzR4SsjccM+i67H9STCSKbLWmCFmWRbOP84LDx/l8dEPsL5/O9x1HRz4t2Rb30yybQfR9lLCHbUEOxpZ72hmtXMPa+1NJgTrrVWst5ThaynCBLqjkvWOatbaq/C1ubHeVk5uBDqr0PB3VNr7662VaKy1CBiVnG2t5XxHLcudVQS7y4h17iTevo1YyxYi+7eSaCsmcHc+vr8vspQ8B13GjL8vx/+3pTx5cB/Pz3+I0LcnYPnrNuchDS3ayL5KX28EyoYT5ZVkc1RZmySuiT4VXmqZg601d+ltNbSOJNLus5EL8J1jrE7+By7cs4fQQSmJeoKysAe2kr3rBuhvYqnnTr7R8hYufL7TpY7Da+4idCGObRvunmZpsqb+XmXVc3zzjtcJDLoZVw9qmXd3aznG68sTARKPf5zHR97PSx23EOtugP0lcFcByZYqp9F0we27CHgj2F5PpK3GzHuypYy0xsEKktLoLbtsDiXS2kS8tda5AXIFNkYN8dYfH5pA0vkbx7Yqou3FRDoKCXYWE2qvInGwieSBZvt8qKOCpdZSIr01JNuLyLTuINmxgzP3lPPs4T8h/O0vuDb7tt5bvrKIdWXWWs82GGwEExLcMACozGI15NJQXgmIKKg158FU0vbCSMdyRVspiKzC9+7n8tQfcv5gFYGOmwn2bCXYcxOhru1EOgWOYosFRKf0gVrSB+pJHqwl0eJoGekoJtSZT7B7B76efNa7K1jvqmG9qwrfxqgg0PmTxuZzquwzq91VrHZX4OsuJdhVSLRjB4n2PJLteaTbdpBsyzeQyE1ztN9FtKXZSkKiLU1mXaQYz/XcwYXjf0zyiaOQesqcVFkE0UmeZUpC5imSZEa9zlQR7pSznaS5l2zcQCWrLbmU6Pm1nkWPkkvw1KcIHvvPLB/cQ/hgLQw1EW8vhJ5yaKm1xESwpZhLfXtIffEeiDwJkRDZtRgRrb7wem35jLFayeb3MiSvQsWmp7ZgShdyBSBrriGARfwuKWGB1vJTXPiHQzw19E4utdVDWxns30HmYKkJtsy+QCGA+Dp2EeioNYGNtpcZgdMtpQgoidYqoq31hFqbCbU1GbiiEvSrGe1lRDrzCXXtINBVTLCjinjLKwHi76/h/P6bCR7YQranEH9bHi9338rlz47DyiWX1dhEkNf1UAwPu10qxcdg0jWyE4OV4wqq4Zssh+q3tCXyN+9nfeqP8Xc2Q+cW6L/BgLHeU4yvu5hgZymR9jKibRUOJC1VZq1NsYh2baXEPYUQ6nRKQdbB11lLoKPGhmggxaDvsdFRahZJVkm8yA29F+oo2wBSsLOMiM7tKCTRnm/AEDiSbYXud9sqTOFJCfram1lv32v8o10ZpUJz5852N/PS7Hu4/PB+ki9/BmIvuSJFT6/YOvtE1lZPJtTAIhs0bFyxzq4qV+DQkECvqdOKiCsARV6C7x8jevQPWWm5Bd+BagLt5QQPFpFRVq+1Cva/mfWPXk94+l3wvWMQugShhLUz1QLnFa30FGpl1mJa3++h9qcwfsOC2Ps2kSJz57Wdt2rVlNXykF6Di9/g/MJf8lLb7SQ6a6GlwPLUsg7S/rIYwXYxS2C5ApB4WymJVoHDnScLIOsRad1FtK3m6sBh1qPMNJ40b7CjglC7rEu95celcQW61FA9F+/eynrrVoLdJbzcUs3K/B/Dy/9sltHSfj+FOP+ql0XfpDCgYj0tNHMzuaYBVfekVObqM2T+aZqzh3+f8wf3EO+phwFljPJItlQ469DqrlfCHegsM42+3l1qR1+XE2RZSCmieIvucZdZy2irrPEuu29peN23rLpAJeXFwWJQMH5AI3/T2GHvyVLLXdH3ihfio7JZOX4Zz9pUSLiZv/WeEqwnoc8drLCM5lJrOc8dqOCp7lu5cOrPiH57DvzPeVbatTbS3jnmlJAgquSQVUCahnbk1ky7uWAu/ltVWbspa7li6mTztLlbvpk/4kd3NxHpamTlYzutRCjTWQWtO4jdvZPVrn28NPZBOPeoldEIDz6to1e1SQ4gWuJqrsBP5/R1uhxZHPsTsy01JofNXWhErT9jEYfg+EUyXz/GhfEPsNzZRORgMdnWEmgpIdVWRqK9nGhHtfnPCgDDHdVEOyqJdZSTaK90o63aiBpra8BGey2x9uqrGx3l5jPHOguIttdZABdrq7PvT7bWWnYj2V1JrLcUf28BL7SW8MLgu+BbRyH2sosHbG4jR4DXd8zRWRv4JBPSd45msWQIdX20tjzrPyTw5RFeGHgnZ/dXE+2uhqF6/N3VXPhoIamWBrIHGu1owtZRTaizEn93OWu9Zaz1lrPWU4m/q5ZgZwOh9mairbegIDlxcB+Jlt0kWptJtjSSbGkg1VJHVu6H5joOVMKBcpsz4ECJc4v3F4GNAns9c7Bh4zs2f0/C6FhHzPhT6/FVvNX1uRHsrMXfXkegrY5Ie4XxOXCwiMsHyjjXfRsvj70P/5cOk9I2bEnFGBmLm0MpBeqebHoAkQCbrFrHlpAp+JxLJKoaLa29rEpTnod/WWB56kOsdr2V1YNVrHeWEuoqIbq/kExbPdGefTyx/y2saf1I+Ix9uRZ+ZjX772UlfxY49LPXuWpHl4qzqzKnW0hxacxQJkI8mySrzmJqjLz8FMmHB3l54Je4IC0lgBwsgBb5+CUeSCoJd3pDgOmsNALHBBIDiKuWzT2+FoAkOneS6Cgi1tpMrHU3UWNoJQJIpqXatEl2oJKLPSU83Xcrvi8M2T2QuAgpr0jz9eFi42wx1NyBTMra0rhIUAyIeKnOF7n8xT7OzPwel9oriXfsINtdQLgtn4utNSx37SHc3kiitYGcQG4+CuzR9gbCNhoJdjTj79iNr/MWfB37CLbfYvctcCRa3RBIHFAcWDIH67DRUmvAEXjcqCZ7sI7sgWYyB1T7tM+GyjcSLfre3UTbmu367Pc7aj2elhPtLCPaWUKoqxxfbyPB3iYHkLt3krpru1ksxSyXWhv5Yfcvc+nBj8JLD7vFd7h17FpLY4pZRPQeO4B4JfGeJZa7agAhSSq86rqvyN3yPQ/fu48LYx/kcvcdrPcXEuzdYUWvvv3NZHpvY7XvLTx39I9IvPAZNx8jFNr+h2tuojYHyg2O/viD61wkL6Z6pkRXrce6IOKEslpxBiGVsvojbrHQi1/iwvE/4+Xe20mq4rc1H1oLoa2YdHspSdPsFUS6qgh31diIdNYQ66wh3lFFsqOCdJsb+ny8Q69fxeisINlZaL+Xat1NsnUPkc46Yp1VpNrqoKXGViKGW4t5oaeWix//b7CkAsSwt1xWMx6yllf3J3qHbLbdBfZJdSuRo61KhMuPE/nnKX44/C6WDu0h1qfrvJHowZsIthbh67mF4ODb8HfvJty5y1K48fZ6Um31ZFtUHqIcvzumW3eRbNM5TQS6drPevZvVnj2s9zQR76izCcF0Ww2p9hq772R7nWUb9X3x9l02Yu1NvHok25rg4G44sAe07ufgXlIttxJvvZVI+62EO/YS7thDuLOJcFed8TPWVU68q5h4Vz6R7mJLgvh76o23sbsLSH30ZtifDy3FxPYXs9JWw/Pdt/PywodJPn7CGuhp8ZTcHO2zmrMcOtqfqollRWwSz4FDHR8jiaRrqCehlnyqgjd+iew3Frk4+X6ebdnGxfYtpjwCrXeQ6GgmPngrz/btZfWLH4Nz33OVy+qpFl8hop3Jchm23G//hON1Lpz0lkjqKjWsVMXlq0PeSjMtLU5Z0VAYwi8T+5djnJn9AKHOJmgthjYBpJBsezGpjmLineVEuioIddcQ6qqzISJLeAUgAcmBqcJe0+uvd8QNIKWk2itwQtREpLPeQCIhsaCtQynREi7M/hbZpxe9+EqqX9ZDSdxrAYgaFsQ2Mi7GOJWOX36O5FcmuDj2XlZ7dxEfLCbRl0eo4yYC7fnE+htI9N9GsGsfa917We/eY3TUHEe6VcBW4FvhuUflcLAcWitMGYmGEkh9r7+nhnRHPrRth/Y8aN9JuqNwE/2dggp0y6XbZUOg0ljrabLfNDAeEBh3kVWCo203sfY9BoxQp65rt513BSClxLsKSXXuJNmVz/KBbfjaipECTHU0uMLJllrMpTuQB4MV+A7czPP7izk/9154TIHzC+ojxLpfcyE5C5GTfEmpBND8IZsLUTtWvysYNqsTVY1XDlnhs6S+Ps6PRu/kTEcJgf47ifW9zVnezkp8PWVcPHwnwS90w+XHbFWj1qRoN6t/JUAkIF6K0wq6nN3R78tPVMQvXzDnM2ZDPrde/fJ3OHf/3Sz33EmqvYp0e7mBQwBJdxSTVEamq9wAEuiuQ0wSUGRVBJBsuwOTgPR6gbH5fFkk+31p0LY608ahrl2mWdNtVXCokXDvHmKf/xisP+Z8YNMUihY1HbgRgf0E/fHaL4lGIrI/rZy+Z3mjQVJfe4DlsT9iuaWRbG859OWRHMonNlxOeKCBgK6vvYlE5257LOENdeWArRKRGgeSlgpXaqJyk7ZSUh2lxEXX7lJCPToWQsdWaL8B2m+0x9mOPBNeaXk7r7uCQE+VgUmA2jzCXVVk25QidQAUvaRYBFRZDSm/QJfGrldYkGRnManOfJJdO8kOlRnoV/aXEm5vJttzJ3TeCm210FHkZeu2kurbwaWeal6eei/+r4zD0nPWTUV0Ew0tHthoLqHn8mpUzRG1OrbcBIR6GKtNlarP7XNKKCWeg0eHeHn4VzjX2kS4u5FYdx3J7nIYKuXiR29ibeJd8Ox9toZGa22UZRQ4za17DTZfl7swd3ruY1dWk+kiNPRlTpSEbK+kIPoMkfkP4ZPpb62GgXrSncpW7STdXUqsq5RQdxUCiLSejuHuCmNytiMfjVhHGeHO8qsaSgCkuhqtvit2oIB0Z4XNxay1NhLurCfZowRAJckj74FnPmlBuXzai+pkbskI3ZX5k69Bop/+lughJ031SM56aFL1q5wb/nNSg78BPU3QkU+m62bCfSWs99ey3ttMuKuZTFsDaNXigTxC7TuMVom+GpJ9TcS7mwm2NrC+v8YeJ7oaSHdVkeosJNN+M5n2G8h0/Dy0/zy03QBtN5Jpu4lMxzZSHZrr2WlaPiZXqLuEaGcR4bYdBFu2ET6wlcjBrTYRmGrdBl159p2a95C7muqrRNcR7Kpktb3ceGZ866rZcLGk/OQl6PxYdz7hnmIDeKBzL+H2O4m13UpaZSDyKto10/3/km7/OeL9hVzuv4VzY79H5stjth+I5E9S5/jgLIr60qXkQtkut5I397oBwnNyVAakoaJEsqrdet7iy5WhO4n3lZEarLbro7MADlWS6Skhcuo/WlcU/d6lgN+aWPxsgEg+7Cx9TDiVCLl8tK039laKCekK6LM57Cn1pqzCPw4RmX4vSwoauxtJ99aQ6Sol011MqrfMABHoqWG9VyCpI9RdgRgn10Aj1lFyVeAwUHVUk+7Za5mWRPsO0t3FRLt3E+y5lWDfLgK9Daz17IaH/hYuPm7VnBJoFUSL2BllM67hT59e9poMWhXrxSfJPjRIeOB3Sbfshu46aC8i0V1CoK+a9b4GfL3NRDsbnYZtKyPdmUei52biPQWEZRm6a1jvrGe5vZHLrY3Ehn+RcP8tbib8wA4C+28k1no9dL8RBncQ6akk0NNowbK/bzf+3lvw9d1CoG8vgf49BHsaCPfUkuiuId1dTqazyEBLWx60bSF04I1Eu24i2b+D1EAB0d4CAp078XUU4tOkZLfc5Arjo1zmWKcUXLl5AbJmod5iAr0CUj0BuWQdil/2uoqL9lLo2k6m5Q2k2n+BdO92/B2lXGrfRWL29+Gro1bek0jEyCTlp7j2RLYNQyJFRk0Kvepbiagstv501NmSVFecouW/wIUfwmf/gtWeUtZbtsOhCtBE4oGtZFpuIDhxJ0vfOkI4eMHm8TW/8rMBol8zJSqAyCf3AiRVYOY60lg1pkCTm573Yhah/OI34B86OT/8W5xr20usrxH6q8n2FMFgGdGeMoK9lfj6am3ocbSnhHRXgQ1ZGRH+qob83r59pHoarYgy3p1Hoq+ZyNAd+AaaOdfTxNLEb9rkkm1Kk4b1jNv5QgBJZLwNQD3Cv96DeLLkbZlH/DzZR4aJDP0WdN9maxfoqUPaP6R4oW+XgTba00CyS8CpIdtZSaq/iPhgAdGBIhM2X5c3M97TZMK+2r3bQO6XsA80ERxsIDBYjb+vlIt9jVw++iHOHP0fNs4e+2vOzf01F4/9FctH/4K1o39CYOYPCYz9Dv6+X8anyb2DSgTUgaohhhoI9exgvXc7Po2ePAI9eYR68wn3FRDtKzZeiYcacVn/rkobic5qIt3VxtP1vnoCvfVEuutJdNaT6qgl1VlNQuf2VeDvyDfQxfpLCHcVsX73zcRbS4iOvxOe/gSsv+S2V1PK3eaO0taFUzsebHg43ky7BFp013DuWYqQukSav5uBHz3I+vHfZFUKoDMP5NJ/JI9Y+82sDNTy4vT7SDz9oMm5Xw2wfwbTr7MzDEa6misWxC5MwNmwMLn3BZKUazOpb9e65pcfZuXev+OZjrey0tVEqq+ajPzjoRLiPUWEe8vw9TliCiCRnjJS3QU24t1lRLsrr2pEumuJ9ewhOdBMrCePcOcW4v01RIf3cXloN88PvoXApz8CF9R0IYlaNkW9mEoaKPKvXzfzE8mo2zf3QAHlS//A2ux/InigzhV03pVHtqeGUE+TaXRpdwEl0V1FuqvSwKGj6p/8A+VEhmoID1YT7K8k0FdBeLCO8OEm/MO3sHJoHxcHbuPs0Nu5OPluAif/mNRn/ob0l/tg+XFYfdEbL8CqxnOw8jQsfx9e/BI8fi98aZDU6f9JaPwD+Pp/g/WOt7PWuY+zHRWc7ytldagS36EKAkNlhAdLLLGQGiol0V3gjRLEK4HE8auacHc9a32NrPU3EOytJdpdQaqrhHRnCQkpvZ5KQkNNLHXXsNRZRWigllRfubl0tLyJTF8Nl+Y/TPrpT0HqsqvgCF22tqZG282pYC3BSIdMLC3es5WCScLErIe1lbLY9N2L8MMp0vO/xtpHt0KPJlQriQ/UstZfy/nOBlKf/4htnaFCSc86/ET+6kUvBtFD11XDlZyYSXHoNYBonkSi4LLSKhE25Jnd0z5350k9usjzY3/AGWVAuivIdBdAXz7JXmmiUoL9VajsQ8dIXzmJ3kKSPYXEesuJyqpczeipMdMeHdxFsDuP9Y43E+0vwD9UxdnhvTw/+3vw9AMQPKPmUWSEDrOGyj5lbYbW7uOnkudnveFlW8IvEvxcK0tDbzd3KttyI3Tnk+gpJ9DfjG/AuTvR3l0ke6vMH870FhHvreJSSx3LvXsJDu4lNCRNXMx69834+rfhGypgbayWlZk7WT/1BwQ/9zGS356H5/4Zln8EIc0uez6HbmTzkIq1Xsra3FPNHc65FPczD8PXjhH+ZA/rp/6Wx9ru4OXBO1mbejuh6bcQOiw3rdw0vcAhPmZ6dprCS/YUb/BL9W3BvgazjP7+BuOfeJru2Umydyex3mLjdXBkn9VGXehsZqWnkWg4+/l8AAAgAElEQVR/FdnefOi5AQYK+VF7E2v3/xU892mIvuB6Msd91g4sF/ua/pYFSav61lkPFxXHrdG3XLJkWP2lBR25Pefg0UGCw2/D39ZoMd/64K2s9zeR6CojPfsr8MRJW1hl2bLXYPN1OQTphzcPez2XSVCmwIYKykT5K2ZOKTerk1n6Lv7PHOTy8C+ZH20A6dlOujfP5gCkGX0DNfgHqgj1a3a7mERvsZngWF8VVzOiskrdtUSG6vF1F+Lr2kp0cAvLvTfzo7HbWfrsRyHwDKgSQK37NVQ5l3VrloNZr1r0NQj0mm8py6LdWZ/7JC9O/h5Lw7eSHa0g1v4GGMoj1ZdPuL/WQKKYINTXbPeZ7Csk1b/d/P611hJLYEQE8v4qVjuLudRZyNJABcvje0k++B/gKx+BZ4+D//uuMZs1lfO692hNkDwMrU60NqNXeCO8RCPy5dWxRn661v6sQfSitwfhN4h8vp31U/+VsyPv5kzf21jpv5XwQJNZg2iLXJSd0J1Htnsn6Z4C45sUnK412F9DpLeOWE+V8TLRt5NY/3aiA3kEB4rxidejt7M2eDuXuvey1NnEuiaQ5V733ES2P8+U6bnOvayc+jN4+XOg5hCJgDU9F0A0TOIsw5rT+HpFHo1iFK2CxeKVVDrrMlsCidrePjrKub63s9631yo/ZMEZqiMy2Ezsof8Boe+49SuvwWRvHkT9ZF13buvMbh/QBag8YNXrqKElkl4DM9PC7hJXVEem89We9NkHSN37QWKDzdBbAN0qENxOoreA4EC5EUxEE1hkVQSSWF/FVYFDgBJAZJUiI40OeAMFxIffyMXuX+D8kXfAD+bNLEuRpiVU2nBFBYQJVUzJiniW8DUI9JpvqT5t7Z9JfPYj/HDoHVwYv43wdA3BwZvIDLwJDm0l3V9gbl9gYB/rg/vwDzUQGSoiPaQg+9+Q7f6/oP/nYXArkd4iVnsaWBv5VaKn/pLUI0Nw6VFYeR4CWqUXcQrSEw8THGUU1fFEbYDMRVYFhGuCLQ6aK6ljNkMsk7BCwbSWxKpTSuwiBB+3lXvh+z/CxdH3szr4a0SGfpFkT70VKtK5Ezq3G0hkSTThGeovxT9QaYou21WMMkWyHAKGf2gn64cKWD0kt62KwKFmwkP7iA7eRrB3N6tdtaz0VLA2WEZwsNDiVK0/We65lewX7rGl3lK4ygpq2wjZhNwwS2LzI86TMWWtxtsJFc+rQthlu9RoxXVb+RHhz/xPwlNvs0VW9NWTGHkLl/puJTX7a/C9Qbdy9jWY/GMA2VjgYggV0QWQNWclcl3APTdFDFjSmgkz7apYfQ6+0odv8teJDtaS7M0j03ezaVIBISyNMqhjGbF+N+KyJgOVXM0xMlhNeLiO2FgjoUNVxEeKyYy9mbNdN/Hi8T+CpW9btaYW7jjbHCezvmSbZ+bW4hvRN/Vr3WxF7b1cHOatRReznGCKMVqWPI9v+le5MHQna6O3EhirIDqyg0D3z8Ooywzp/oKDjQQGmwgYQIpJHfoFGLoO+q9zQBnaYe9dGn03a/ffBd/9NFx+CdS+VJtYumpw85py+UYrJldvrOQFSC07PuGaiGpBqbSvRElDQMlp5Ki2184kSce1BuYyRF6Ey9+Fby5wee5veLHrnSy330aqvxm6S6Bnh41M3w5iA4UWo4QGHf/oLoLuQpL9+YSH8vENF7J6uIjVw2X4hlx5vWJEhu8ge2gfkcEmUxKrY3VGKwZ2wGCxxSO+sV8n/vVJp2yzKfwh3cUVi3gFIKKAZ02sY13MmtWFBRPNk0QduKx+68JDxD7xYWJDt8LoHQRGf8lcPsYa4BMfgOQLr/BMc/x3nlWK69zPu5jDXUAOTnpNFyEYeBOJVz7t+YLq46Qafq+Vik5dfp7ww/2cHXsHofE6ksMFMFwMh+rJ9lcbIeP9+SQGK0gMVJE4VEnsUOVVHquJDCgoryYyWkh2ugRmKjnbu5tL37qXpLZgyAm4lTXrfpRk8NYh6Fb1unZa8jSVYd1r2qbHJlXm2kZtn75lEq77i0qlg0+yNPMrpMbrjPkM7CEzWGpZqdBgCb7BcvyHqu0Y6S+0ib1wa4mluePDNxDr+39gVtm+Yla6dnFp6vdJf/soqD9yKkTCr7okL7ZQutMrstNLEnaR2/iXm3U2fgm+Wrh65aO5r9h8NF4bcVyhpTbPsS0rLr1A5GunWZn7c1b63krmUJNNtikFnBgoIDZURHyohPShMjhcTXKwjsRgnfEgNFJKcLTERvhwqfE0018HfQ3QI6+ikdSAOzc4Uk5wtIjo0M0k+7ZDbyGx/jpWFn8bnj1mnktOHjPaPiOrqUFHDyW7tMAvpcV8Of6+KkRw9xeG2LPw0oNcuv8veHbg7Vwc3kd4uBr63kx48hYuP3GvWZ71RIalQNqslegkmU/FVgSQq/9TQB+3jSmTLqsl+QtFyTz1CEsP/gXnx24lMFxKWqa0Twvsy8n2FZAa2ElS5d6DHkCGy22WWTPNr2sckqtWTWSwnPj4DtKTO2Gshosjv8nFpx4l4CmZjXy1uR6qLXOiZcLn5dklVnpVR7lfAr1ZCiX2JI3am5tLXLJmz0JT2LJ3genbYLQYBhpNAOgrJSH/e6iapUN1rIzsYn2onFh/Hpkel3ZUGjw+mkf40E0wVE5q8DbiJz4M35kDdT5Mhm22WNfvmOUlAxTzKC70wKzrdYJwdTzUd+s7AllYkfepG9YXqhXt9+8n+sm/4/LAW0iN7iNzuIZY906ivfkWZ6WGS1lu3070UB3hQw2EDlcTGiknNFJCeKSE2HAJqcFyS/nTWwc9DeiYGagmcajczgmNFhAbzSc5tMMSOqn+Utan9xH+yt/C0jddBa8tB9e+NRc9d9/F4bpMgcSuVyDRCxvE0Au6mSiZ6Hnr65V85jTPzP8hL/TfYpk6+m9k7VAVz365i0T2slUeBsIZ2yrFyuGzKbJJ37UCRH58VN2RHCNFcU3uBF+C70xzfvY9LA3X45f57Soi2VNKpr/EtGxSbtZguRHrdYFiM4g8gMgnjo/l20gdqiJ0/EPEL71o/aZMw9iEjoRIvvmrAKL3tDegJyy6BblfapBpANF0rZUFrZJkBbc7t9ImPlLfmMU3uQ9Gi2BAmrIe+svRvfkP1bA8Usf6WL2lT02w+opgaBccbiA6VoJvpIz1Q7eROPmf4bE5iD1j20/okgJagOix2dSkFfFpnY4uxpvI3SwTV4MRTQLr67z14i6DKfdFTRdegDMPE/9cK6Hp9xHpaoYele5UkBwsZKW/gKWhYqLDlabUoodL0ZDl0BBPk0OVBohsfy301ZoHkRrMnV9CdKSAzHgR6ZECkoMlluZeGmnkR3O/SeCbYxBTwzq/xZGZ5IoJbA4I4pOWnOeebxw9OrhmDHGS2hE5ex7ijxH6p27OzvwmlwcaifQVsDJUxeOL7ye7/AVXaRyJWB4jJYUopyiTuVaAgPabCKXFMKFWKk8r6Fbh4jeJfKGFy7O/wuWhSlY6dxLuKSQ9VGNDLlZ8qIzkcAXxw1c5hquI9FZZajQxXkBoZIelSnnkHtsJS6tdnYvi9Y81i/cqgJjr5dYnyJNyNNdzJ4TZHECiasK8ip+EbdiiJg+XH/gYvom9ZEeKwbRlFQxUkRysxne4hpXRWoLTjYQOK41dQLa3FA7vIjVcz9qhCi6O30Lw/j+F7y+4HrWpVeu4KBHVzyp2MJAat+ROKJvo/G9dp4RkQ2leLUD0dfZF+iX9oiaLFXsG3A6za98j85VDBMd/l8TQbWQPNRDpL7YYI3K03uK+9HA+qeFikgLGSBkx8XO4iuShGlLG7yrSQxoVHr/L7DzFjAIH46UWQ64N1bI03MgLg7dx4fR/gR/e56p/CZNKh60DpQXmuHUlWrPuGCYllxvuJUcb9R9Tlx7F0IqzHib7T21cnvkNLvXWszLSzGMje0l8tx2C33W7pwVd11VVIgmA1+hiOQaqeEyz7M5HVDcJBZbL8NKX8D30V1yafSsX+vIJDOSTGd5FZqjBxSCHSkkOlxE/fJXDA4jqnGITBQRGCvGP3Q6PTdtKPtMwopRUssBiluHVAHGvb86USPu4lpVeYki3lnAdyjUxZfe59G1emv0g/pE9pA8JIIUwUAoDNSQ1nzFax/p4DdHZBmLjtbZoS6U3HK61DNDZwTrOzv82PPMA+J51lkPdTuKwnAGVUQokuq5XWJBNbta1AiQHsmgsQcKv5MXKxv7rcl8StlOvD0JPw2NHuXzs/ZzpaSR4uIHUzC0kJ6tJH94OwzeRHd5pdDCFN1xDXMmT4ToDilOApcQPbx5lBqjE4E7S45WsjdZxabiB9dE9LA81szKhXmV/CuvfMIumWbqgJnvNgiYIxZIEtPmoaTSl7jUEkisAEX2Up0yoF0D8AiSfg0ufI/bFuzk3/m5e7N/NC4criXzyt+Hl+yB0zpltTR0pJLOJQmPA1f0zAstMW38usVN7QwRA2wpr5ZbM2+MnWP/EBzk/Usn6cB6ZkV2khxotQBcxEyPl1zAqbeIpKndtogDfZDnB+d+Cs582S2baN+eOGu2037cAolvPaU73MAcQ90xkFUu89I+2G7ZOWZeJmZZdgR/cy5nx38F/uJnkIQXaO2CokKyC1uFGgnKtJqoJTVSRnKwl0VdtFQbpkRqWBqo4N/F2/A/fDckX0W5VyZTbi1zrps8m4Ly3gaqzICLw/3oLovtTuiIUD5MIr0My6LZ1SyZtbkEC5hcvZU1SzxF+bJYfHf+PnBt5C6HRvTBZC4e3wMj1cPhmGC4iPVxpFjIx3EB0pJ7oaDWRsTIi40VEx4qIjxWRGCkznqcOlxMfLCIxVsHSeB3nDzcQHN1Damw38ZFaLkrZPTEBseeNF/6E5ntjqOmDwBHJlUIYODyQbEpQiHaK2BJqdapFbNotLfgkvPhpgp89wA8G38HFsRpWj94GT6gJ9osbKT9tuKZK9mu2IMKBJqiypu/E3iiJeNQaNlou2vc80X/q5vyRJtbH8yzjkz7UQHywhuRIHYmRSuKj5Vc19FnNzCoTFh3PJ3Cknuin/gR834JUzK7LLIdTJUbkDYAYup2PoocCiAmjnlga2+u6pzcCApTmGi7YnkwkzhD+UgerU79O4NAuEkPFZIa3wKE80oPSms0EJxrwT1WxcqiA5Fg1GZVZDNSRGG/g0kgTq/d9EF76lKVmtVgzqOF13VjKuhovPdflGJsNIF6ud1PnFfe+g/Xr/S9L6U+qC7oSxk77EtdKJm9vMz1MaxdbpTUElDMknrufyx//c/xj/x6mmmBkC4y+EQ5vM4BkDlUj/uYAEh6rJDhRQnCygOBkvgPJaAmpw5WkDlfbMThcweXxKi6N1jgXeawRFKMNlrL2iT+Giw/bfiYCSMTbFTkmoklvmdVQsC4o5LIyjpfGOq0jyRFJezqGzkLoOXj+YZYf+ihnJ27n/NSd8MQshF6EkLrVO6Wv/Mw1AySkCgDjjITIbylUlZL7c/6BOpp/7xTnjt/B6tQ2UhPSMLVunmS0kfg1AiQ2ICJXG0D8c3uIPHIXxJ81Kyba/WsAousXMe0+vM+oTseCVs1S+5VilHU8R1pBY+QF1j/xF0SPvIPAUJ2lPpPDbyZ1eItpz9jIbkKTuwhMV3OpZ5tpS1kWWc7k9G4uz7yV4MMHIKGgXLu0OsuuBgXy/kU6m7fIXVCuosEL0MVvfUYjx/vXCw6dL1Bo+zaXtXOFBrZE3wiSJOpXax53LYpMNB2npt688HFin/pzVoabLDGSHruJzIjc5zIyh2o3AaSW4EQ5vqkifNP5BKbyCU8UEM8BZLiWzESzxWMrY2WsjpVZZTCHqmC6kMxoAWfHb4UfztoqwFA8ixbwqVWpFzo6gOi6cgDxKj1yPF2JqHWQRyvpAK1n0vp47Wf/wuf5wcRv89z078Czn4DIefArRPBCh/8VABEBjY8miRIqEdxlRmVZzJu5+AOWHnwna7PbiI8VkB1zmiI62kRsrIbYePlVjcRoFYw2GUAyR8pZOnorPLvgUoK2/NW7OF2gN/t/xYJ4cYn3+gZAdK4pU1ebZmWllni/DFllRCKw8n3WTn+A6HgzsdFdJEcryIxvsZEYrSEy1oR/0lkQuRdyKZK9FWRHmgjP7GP9vt+HMw9BTFsKXJkMywm96OcEXxci5lsazfnZnqXLzYM42l8NPLzPbNDFeRf6Xl2Hl7qzZuY5QKrmyWbr0y+Q/eExzp1+PxfEg+N1ZKdrLHhnpJHMcAMRrSuZqCM0WY5/upj12QJ8MwWEpgqJjZeSHqkwDyIyupvg+C5CU8XEJgqRC8pwBRzeCqNbCY5Xc2bud2H9Sbt/TYMabHVRqo4wLSiKeRbEAOLmgXQfup/cJKnOMsKqIkTLrtWX4IXPwsuftyXSBC+5LKzR2GUyr9mC6EeNSTltbZNsTgvq4izRvvQjVj71btaObUXZJgEkeGgXIk70WgAyVgEjDUZUAWR54S2e2xIgq15KuYvTtW0IgtwFuSqvBIi0qLkZ3mdkPdTcOKNcu24ivQTZFcgmrEo2ePp3iY9XkhitMx86PbGF5MQWYmMVhMZ34RNAJmtsXiAzVkFmsJzsaCOBI3cQ+NyfweVvubZDpvlEqNyfLjYXdG4Gh3MfROsc40XfawGIZEyf1y1vFiRl8DSZakLnJThU7xTXHmuWRVsmtfYtVr8zzMr9v41vdg/B8SZSU3tgtJn0oVo313GoiNBkqQeQQtZnigkKCALIaDnJkRpCY7cYQCQX6fE8A05mRHzdbgBJjxWyMv1Wkt9dtNWAiiC1qYEtqLK9F0Q3j2YbINHzV96XQKI72pBV3ZdcrrWXwH8G1IVRnek9vidto4fAtbpYLh0qAhuVxUOvRFkmWV6rcXN9hfXPvIfgwjbSE8UwXk9guN6IGpuoJj5RcVUjOV5pAMmM1pIWQE7/GsmLX7OLyajn0WaAiB4mCDmAvLKuTEG5neEdBBYtEEvZPuhx0iq3kfOTUYDwfSKn30VqopDkWC26jtTkzSSnthKbLCM0VYt/ahcBjaEimKgETZiONeA79g4y39TWYS9dabG0YSFUfKdgfNOw9xw4JNC6vBxANhhu4vD6/+m7xCMNPXb/5SpruCSFZNC0bjpDMqJ9Gl3NV1Z7eYQeI/Hdfl4+8uucGb2T5Pw7SE40kzpUQXa0DH/fNqJTJQRmSvDNluGbrSA0XWE0yoyVkRqrJDTRbO5oZmIHjG/DvS6LXEh2fAeM7SQy2cT5U/8NVtRVft21LY2onsxZCtHFgcRztSxod5edo9VmK2LnC0O54SlPR1vJtFqnrhPj8rUBxE2oOR92g3Oe3Akg8qWNm/41Ap97D+GF7WQnq2G8Gd9IHb6pOqKTlVcFDoFKgpk9XG8WKX2kEt8nf5ek7wemDV1libeQRoTwgKvSGGvRLSJ66lcHWQsTEB1EZ7OEWmvu2h9pP1jLM4m6y08Qu/dXyU7m2TXEx6tJTu4gPrWN6HQhgekKA0hoopFAfxHYPZeQGq9hfUFroxdd5xO7rlyGKpel2lw57SYFxUn9bE7bO0Y6cXbCYRLyuv9JeLwCDvt+A6atu5CFdb+nCnJHpjjp+JrbxXajFiFkmw1d/Mzf8dzku1mefgexid3IYjJWSPzQNuKTRYSnyvBPV9sITVUSmxAAikiNlxGdqCc6UUt2PB/GbyY1XkR8ooz4RIkpICYKyEzV8OKht8D3pyFzxs2Qp5MkE25qM4dho8iGS6pXHY2kSGQhddQ92/2IcJ5M5N7XOe48RVtrpFi6doDEbPGit6xOv+RZkZxmMuD4zhL6zDuJzefB2G4Yu4P18SrWj1QSmS4nPnV1IzVRZQBhvJaUtNMX/iOpxHmb6jKOW5m3R5GfBBBHQyPYhkshCnrpdVcjKosjpyxiwaxpnaUnSdz3yzC5xZIOiYkaElNFxGfyCB/JI3CkGP9Mk9X6hAcqYKoSJnaSmq5m7eR74dIjZj0sRjNGea7Uj1kOd4GbgaHHV0AhhF39Xw4gpsjEOE3yqu5J5bCebtEuUcpxpQmTZp1Map2stxGnlWQkLpF+/iFevPeveGb47QSndsOUXKRtMJVvHkNsqsomTAPTTYSn64hPVpKdKLKhpE1istImCxFoJouIyw3zZILpMpit57JKWj75XyDwLbd5qZKLXqcTiZ0DcS5m87J9KhfxQOKx1cTxCv3c5/SegJGznTG0DEJrT66x1EQWRGJj+5YKGDmYZlxgJLNmV7T6FIGH3k78iCaVfhFGf5n16Qp8CyWEZ0qvASCVZIbrYKLBABL/yp+g7rjatsCIEPfUn2dBHCFkQVSyobUhTric0Hno1sEDSMx2kEgQsSSn61huMeHK0yTueztMv5nURDnxyRoSk6XEZnYSnNuCf26nA8jEPhKH60ECMLGV7JEqC+7xPWrmXfS5wlhdjOcimJZxVkOv/jhzJQhe4G6IvTqQODdSFlKiIffOS9/oorziA4UdcjWdIxYkk9GcTRS1EHVLYiWlZ1n/+gTPTP8WgZk9MFsGozfC7E4k9FIgKgwMTu4jPNVIfLKK7EQBTOTZMauYZLzKKZupAmIzBUSmK4lN1pCcqoIj9eZl+KZ/EZ5ZsK6Y4q+WO0sRS+w8Vjq6mKLRO1dcRd2SznG8lkA4emfScVKag9K0ne5JsZadp+RJ+NosiH4i7NUt2VoRXamG139VP2SLlC79M74H9hGd2goDvwoj78Z/pBL/qXzCs8XXBBAFhEzuMoBkvv6X5hKpKMQAoopbj9l64bUB4gmJBxB9TABxu0gkCXo70Cq2Y/kZDyBvIjVZagCRFRRjg3M34p/bbgAJTtxOdnw3jJfD2BthoYr1+z4EgadsT8L17BXmOsY5Qde163lOs+X0ziuEwFuC6kovrg4gBrLkZbfc1eusLtdYbtcGeFXMqeBVrVWTMdsXPpnN2PqSUEaWRWCKwvkv4//8X+Of20tmpgCmboLJmwwgqnaOjN+K6BGe3G0AscLSyS0wsQ3GC0iMNxKbaDQaxmbzDCCRqTqbcE3N1MNMoZXs8MW/h7Xv2e9e2HStopX9SXFYSY7WyXi1axZPuSSMc8oEeDFaMx1K3/vNauo2teWH9iYRraUYrimLJeK4qlepm9QVjnp5Z110JrgK5z6P73QzkfGtMPjvYfRdBI9UET5ZRHS2hMR0xVUNaRdNFqand6G6oPQ3/952JpIFMYFTJZ4eSOF6LpbsnWnMTVksCaRbn+y5il54ogU4mqXQ5JQ2rzQmCCArT+N78L2EjpQRmakmOlNNcrqE5MwO4nM3EDl6M6HpZqITt8FMswlJcuwGEieauPTgn0LwaaKRDD51c93kG4spulwNPdbviY25c/SaZdqkgZQ8sGXQYrbe0U16msG0gycvGy/pwjedY0pUM2JaA67da3WPrqGF6KfH9jXWKTIO4bDtcaLYTtfn9nF3v2yZvfgZeHKeS/O/juIM5gvIjr4BpnaQnKogOtVEdHIviYm9JCbrSczuID0rEG2HiWKSE7uITzYRnykheiTfaBs8Us3KWBXRiWqYyLe6r8SJ34LlLxKN+FHxT27OSBRw9/cqgJgVcTQSONxw0xHO/njrnTIpuw31IVGJl+PDNRYrOoaI6B7hRXRx1cOKbVGWUlnGAmtzjXCsGCb3kZ64g9hMA7HZSpKzVab9FUO83pE8UmHfEZlrwH/irfDUIeuWKIFywuTRzNMIrqAy559urmLdJFtecJoTTgmC7iORWkcVpbZ4LPUCgX8Z4fuTv8ZLU00Ej1TAfBEcuxlmbyQ5sZ3YaAPp0SY4vMOC+fhiHWcW38aTn/rvEH8SUhHXpdSzwB7ZbNmAkVPVCd61KMMmbaYNZ3Rebuge3TZwcicEGC91vQkLDmy50hmvXkkf3PgSCZRgqHPcy3qmx/Zn37mJxxuAE82SxBKCk1Zpxmzb7PVHOjl/5JdJzVeTmLmR+PxNxOa3E58rJj1db/FndnIvoYVCQovbSR0phekqmKqxuZTkbA2RuQr8i4WsHy/EN1tCbKqSzFgJmdlaLszW4//a39iW1NpuIncbjre6MV19LprwvAJzEV1Bbe7WdZZS+y5oFoCuKI/NeuaaLIgxRBpG6BPh9E+/5S2V1LwnqQvwgzn8RwWQQjIzDTabnJzSsYbUNQAkfrSC8NFSfAsNrJ78JXhyzNYY54TCBEgaT4v/jLEyDXK7LE11BUSbCtxyBLxCeImJAljtuXfR7X+XfpHVJz/J149+mOen9xI6Wgzz2+HYFpjZ7vzqsToYb4DRfDJT+QQWannp1Nt47KE/Bf8/u7XQlkN1gqlr1hANbWwIonxht5+eSz++2qpIGwog7p7sS/RFHr/d/UiQ9D06Ry6Tlh97aJCMXPWfmO0t9xXvEwkyT36OCyc/RPBYI/GFbYSO30R48SaSx7aTnVFscgeM324AWDu5ndhcmQHDgvHpCpIz9YTnalg7kc/ayTz8R4tIzAggFaRmarg4V8rKlz4A4ccM2LonKQ9XO6fr+UlDVBCdHaB067KQOopUr/V3bQCRiJmZ1yo9XaT3i0Z/IVZFbpccQObqYD6P9Gwt8dldJGaqScyWkzxSdtUjfrTMABKY9wDy1Ajax0TXIZJoCByvBZDNbol47ATKASr3Hc790szrZZArkT5D4uzX+N6J/86FiWYS09vJLNxIdnEr6aNlpGeqbGUjM7Wg1YbTVQSPbmflvjqee+B9ZF7+uPWINV7mfkSCugFUWQyXTFBCQWlpCYGbzPQUkWcldK9OezpQXLlx933ufrQOxgmSA5IHEr2pm77qP31BiHD4svsa9d4JvET4iy2sCiAndxJazCO8kGcAYaYURm83gAQWClk/uZXYXCnpmQr0ngDkAFJr1mP9xA6CR4vJHK0iO15OarqSpWNFnP3UO0mc+bR5C8o+ilqiwZWkb26idTNxHUAcPa7oh591+9cOEAt0AsvFCHwAACAASURBVF4a0HNQDSDy9TU7uWTNE/xz1bC4hdTRSuJHaokdKSF2tIjUXBmpo+VXNRJz5Q4gC3WsnnobPHXYNSfecE2c16FGFFcsiLp7iICe9hCFPOHMvZYja+5oK9ekb5JrrtgtcxF8T/PiQ/fgn2qG6ZuIL9xA9MR2YvMVpI7Vw9EaOFoP07eQmakjOPMGIvflc+H0W4g92g8KjqXCcppckm4AcRZDOzA5cAgoEgBPlj1gGBA8cus67R5ymkFKVOd54NHnlbJWulpAM8DbZJpnZtypV/FfQpnAH1hxIFWAHPHBE/dy+fg+Qid3ElwsIbJQQlru50wJjN1mbnZkoYDAyW3E54tMqXCkBFVDJI7WETlWR+B4CYHjRUSPlsJRxSBlpKdKWDtWxEsnbmftu4fdQigVx+aUYY5GdvOeW5h77FVK5EiU+8z/ZoCIE8p7aFLFZ8Q3ThpAVFK2Cl4M4jtWCYs3GCBiR6uIzu0kOpdH4liJA4mA8jpHYq7MCBicr2P91C/C04OWI7/iu3vh0StcLKUynWCIWBuC5Qle7rM5cNhRfXWk7lMh0kG5WW59gf8bo6Rnb4GZm2wSdP3kTgKnikicqAbFXEf2wuxe0keqic38HKkTb8Z/ooLgwx+G4DNYxzNxSmT0LkYa0c055DSj6zbzyuvU5KJLY+r6DPy6fu8ynel00uLcCvUOUBCudkculnHrXTwUXQU0ch+JRuOEoxGi6q6iOjW1Vlp5nNVP/QbLxwsILFYSPVZO5uhOmCmCsVtgcg/RhZ2ET2whPr+T9FGBxAEl7gEkuFBOcLEMeQkGkKkyMlPFBOcKeWmunkuP/A3EvmcWLCfsuv0NgdeDVw97yVkS0cVInruRn3K8RgsizrrOJwnW3Eo7/arlkj2AJNetSZdvvhxOXI+EOjJXRfjYzYTnt5GcLyZ9rPyqRvJYuQeQWnynboOne627R07IjWCeT29CJMmR9XAGJCeTzq/yAGJafJMF0mSe+i0ZktIxsjGf279Cu+I+/0k4/jY4VkDweAHLp3fiv/dm4qdK4KiAcxvZI3tIzZaRnXsTyfl/R+z4Vvwf/yV45iFYWbkCDuOsxF0mxeXvjcE5wc8hVs+lqY3usjLuE3aJuuHceXbzkhH53SmbrwqT0tq8jbkrJSDsZ3+KcPxrXg4EEpYWjWT8ZLNhL610kdS3/orLp+vwHa8nNl8FitNmS2GiCaZ3kVjIJ3piK8mFPNLz+aTnC0nOl5I4Vkd0vp7IQhXRhQrzOCw+mSknO11kivXisUouffJ9cP5+U845gOjWc5jYeJB7IXf0LInTjD9bQfwvAEiALGvE8RkjctpL5lwzkWgfusfvZW2+0gASP1ZOeK6K0PzNhBa2kFhwhBFxXvc4Vk7sSCnh+Wr8p/bC0x1uUcyGL38l4HXC4Pnem+JZ08yirATqp4FEk2WpjCtcTGsWUdYkAr7vwIPvg5O7CJ0sY+X0DgL3XU/slGKtW0hO3E5qtpH4bDGczCMy8/+ROnG9CU30kTY4q/0qvASHSaMuwAuk9VDXJR2UG7lrtDe0TZlK9zyA6PP6jM7JBV5Zl/uXACkolc3JDfUEU4pep1/1nzqze1NNccFO/bb0Q9EAnJth+aFfZvXELgeQI+UwXQGTdTBTRXp+J4njW0kubietGGUhn8R8KbH5GgNIdL6GxLEqMrOVruTkaDnM5BM7utOs0tp9byf5WDdwecPF+tcCxBFKZ2uIaD/97xoBoh/QlHzAupuIEcZMS/OK8VrXHDKA+I5Vw/EbiM9VWpYiOJ9HaH7btQFEBD1SQuRYNcFTzfB0K6TPmxrZUBgeWFyFqjfP8WqAmEBtAoho5lkefY/ejicypHNd9bJpsmqynDxD8rN/Dff/EqHT1ayf2kbw3jcSO7mV+Gw9sfFbSM40EZ0qgvsLWZ/8v0mduhHfXBmB0x+A5z/t9loxRomWHjhela7dDN7cfclF0CdeARAxW7PhueGVWogvSn1LdnNDDpyjyWsLyE8XHUejXN9oS0OnI07m1Jwv+lVWHv5Dlk7tIrpQDrM1lso1kBwpJDOfR1JJjQUHElmU2EIpUVmO+ToDSvJYFdmjFdYBhvlSOLKTxNGdxE+UEzrZSOSrfw7ZM0YH0ws/yYJ4vNywKAYIj9b/+wGiX48Ty4RYT/qNWQbnhJo56JKV+QmR+fYCsZO7SR65gfBMuQVhMd3kwg4SC8XmZsnVev2j1Fyz1Ik6/MdryHzzv9uiJk3+iiAJW5TuzK6yOJbl8LSxCb64bzNfV5ILRj9PCnPCuPlohNbn9GJqDZa/QuJz/5W1kzUkPr6D0OIbCc/dQPZkDSzuIT27l+RcvblWodNvImiZnRpiR29nae79pH74WQhdtM4dmpHOxCOkUgnDhG4jlFaoriDbyZ6oamlgZay9+Sa954ot5ZopFrgyg6wO9loguBJO4Yu7NKeyYQnNxMuGaH/FTMZGNpu1phGvCYrNb+bcVytHcXM1Ya3I04XFniHx/S7OLNYRO14Es41WxY2s6dFtJI9uIXnsJtKLWwwoscWdRI4XE1msMJAkjtWQnquCuTKYK7KJR+YLyS6UkjleRvJEGZfu/RVY/66xIpFME405axzTfSZc4bW9uZmBUj5KUGyY5ddWENdoQUzCiGcS1oI+Z0HEn1AsSkp9YONr8Ogs8ePNJKevJzpTQXxxF4lTNQTnCzyAyM0SQF7vsZjssQoyJ2os+E1/408grd1rnQCr2lMEEjDcDPoVCyJBs4kiESznkohWr9I4r6Cthwu9Zn8qsfA/D49PEPjEW/EvbiN5agepE3nEFgvJnqgnfWS3ASR0Mg/fvVtZO1VOaLEGZitIHLmdcw/8BclnP2kd/qzDeWydYGgdXzyCL6vlrqou04y+A4mu29DjTa/ruQO/nCclTLyOJBbHZAhFde+OJFpfZDeogFrd1JVlzAgkVw8Q0ctau3qxTiBqUmA9gONPHuPsqSZiJ/JhugkmG+FoPtn5LaSOaW7kJjILW8yKRI/nEzpeTOh4KZHFcmQ9MnPlFt9xbAcs7iS7WAiLVbBYAcdvZv30bbD0Vbu7dCpBQp0WBPtEBtWpWkX3qxmoC/4/CRAZCi29EFnECAFTfRuCwSDx8GXX3PnRw8QXmkhMXE9itpTMyWbS9zYRPFpCbKHYAjbFIjKzr+coUDFfSWaxitDJEmJf/U9uua2shGbPdbTyAjkUShp4KterYVRcYieKkhqmXTwUGKmvIOLVdDaQ6PxYABUfJr/631iaqyB5qhTuKyZ8bCeZE5WmBcXs4IlSVk+Vs3KqjpAYfGQrHMnn/HQTa5/9Y/jBGCz/o9sLnCTBDJyNWR5QuUDWSVviN6kYSF0y5CuZPCgF7FkOA0cOIK57pLXv9DS9aVNtkJk8A2vfIqG9XdIqQLxKgIg80jkxZ3k00+JPu6JOKZ3Ui1/k4ifuIHpyB8zsghlNFheY1UgJJPNbPIBsJ7pYSPBEsQ1ZEiVgsnPFsLATjm8nfSKP1PFi0ov1MC93fQvhU03wo4ecxUyGScmdV0ugZBrNwUrvbeZbjqWeuvCsiGmNK2+96tE1WxBrTOilUS2TIqaF427f8PQyxB6Df2kltdBAavx6kmq1eV8z3L+X4NEy4oslJBcLrmqkFmR6nUaJnCok8MhvQ+AJd/+qp/EQq5lm5W9+HCAyyR5IcsccRV9FKD3NvbX5aCDMLMOzR1i7/zfwK7g8WWL3FZ8vILOQj64zdLyW9ZMNrJ1qIHq8DI69CY69gfjCDpYWGrh477sIf+0gXPoyiG4qlMzmaqOyrKvlaTpMUn2e1FZJpR1S3RvaMGdBZEW0j4abuNVOssJTStZUiQVtDbD+T0S+P8xL2l8ktXL1ABHwFJtph9e0RSH4iFoiQG4gS4+z8vl3EjldADPVcHQXmYUiEioxOb7NgMLCNjKLO4ieKDElFzpVQPREAZn5Emc9FreRPbGFxGmXHUwu7IGFBlh8s3khsafnIbPutHJc27ElkTVRvJhbcGhu6Sb+/R8FiMmV/H1ZEYFDWw2oc7hptYtw+eMkv/6nZBY12XM96aN58EADPHALIU0aCiDH869qpBYLYEGFcaXETu1k9TO/CivfdvcvcJg/Yo1trC731QDR5JmbgZX4vwoor3j+E9DiEdw8CpWB+n5A6ls9nJ+/Df9CGdn76okc2UbmxPWkjm8lutBAaGGvAcVcjoU3wOK/IXP852w+QDHM2ifehf/Lf0/iBydh5QnH9HQuThCYZadlOhRnBLxq1Zya1A2LAa73sJ7p7KglGFQ2HHYZvsSj8OQQlz71+zzzwIcgffaaAGLOfjwqBFpdmCJRVQObcQudIfyP/4nwac2GF8HxalKL5cidSp/aTnrxBgwgSvkeLyNwqoTA6R1ET95MZqEA5nfAyTeTPn0Dkfu3E7q3jMTx22H+Fli8kezJKgL/Mux6XqmJtxoWqrIjE7e19IpBJQI5UdBjBxbRUu6F3vnfaUGkJfT9qkpVoy0pMRWvZdVHVcszn2XlsTYCj7wbTpbB9PVkj94I95fDx/cQPVZN4ngpqRMFVzXSx51Pmj1WQvz0Di4++Fay5796RUHYvcv9CFi7HgtkJTWeiyWAKA5xhHMpUWdRRLhXj80AcoDR7UtDxzQ5Jq1+8R9Z+9yHCdx3KzxYR3zuTWRP/QLpk1vMgibmbyG1WEXqxE7Sp64nffrnSJ1+I4lTNxK7txj/6V1cPvGLXLrv9wl/tRWefdAqf4mqe7sAoXuR6GkyUW6jHO1NfN6It5TCzcHFuRpypUg9C+cWWP38B7h8qonAI38AqeevHiD6cbl8aW14rmVVMes8qcXJcnYkC4lv/iXhU+Wkj26HUyU2iRo9XkH6vjxTHizkWWwRO1FJ8HQ5wfvyiNy7jezxnbCoz7yJ5H1vIPjAVgL3VxLVvNP87bBwo8nU0lfarMsMGVUmqLGGytxjqLFXPOEqEF7NSSlKDVeaYkLykzXgtXY1MZ8jnbGsogxHPCIOnQdeIBl8maUXP8sTX/gAlx/aC6eLYe4GMnM/D/fmw8ebiS/UGkASJwq4mmEAOV4Nx4qI33szZ+69lcTL/+BkW0DYAIhm+VUOc6UOX6DQXI3iEEdAN8MqojnC6VUv5bWhbXKkdmARQMICiAJmE9RlePE42X/4ANmPF8Dpf+uAcCKPrCzdXA3MVZJdLCB83434HthC7BPVhE5W2ERj/HQhiVOFRBaKiJ9qJv3J98BjnfDsKbj4bQircYRL2arzoqINSxbqXnVpnjHR01xaN6wgOp0iE3yJ1I8+ZZWwZxabid5fAI99ANJPXj1AFIOpEljzHxpErOOI1uPo+lTgmf3ufoKnakgeeyM8kE/8RD2h4/VkH1CM9iYEEHkAkRNVBE6XE7h/O5F7t5A9ngeL2+D0z5P4+C/g/8QWfA9UEznxKzD/Vpi/CY6Xcf5zH3HN4DLnXZeS/5+99wCvK7vuey+HBexEryQI9jKcrhLLlhRFUYrlOLFjRYmduDyX51jOs2PHzy+2FSdOHMe27Eh2JCeO7TiSZoYVBAESIFhBdICoLGCvYEXH7fee8sv33/scEBwNZ4ag9OaT7MtvcV/ce8655+y9/qvttdfWPimKEmn9TkZCxGrSQC4GvrICFyFI3hUgQc/qgUKNoFZkR/3x3p/5zp6nxFgJEa2nMWkG3g3w+0kPH+Rq62/Q9+bL3Nu7GjQgXyvA+4vl8GYF7H2FzNd2PBNAHGkQOcL/ew3ZXWUMv/4iXN4T2OjGQw8yYZXwJ6n7iOctQEJwSAmGAJEm0bO9Ez0CiNZsG7lu1JBWil2G/t9hatcLUFNBamcpmTdWY8D81XXwlxvwv7aO6d2rGd9bSaz6BSbf2GzylvzqCqguwH9zqZWuezbh7PsY3pGfgP4/hLuHIXnZaESbfBgkmYWKZbbVYMZPs3j3IN5L+vrr3G/+Ja7v/hRX/mIb6Vr11S+aiolzdtJNBFAMoHX00hvTJuImD2pC/aF0mL4vMfn6R0h+dSXUFJHauY2YUnH2VuK/XmCjU1/dgNUgG4jvKie1s8Q45Xy9BH9XLpm9uUxXlzG1dwupNz6K/7W/AV8thTe2cefAL8Jkn82yVgV4E8lThzikJLUDs0qjKWGouZ/3Cg6dG1FKulmPrQ7VQ4V8IR4wyzoVU5ddp/yfADPqGMMWWjgfOMNG2GoBzg0YP0i29xe5Vf0Sk3VFZGqWwe4iUDRj52rYuQHvzZdxRbu24O5eP2ealk9TvRn2V5KQ3d/4y5A6C9nbSqNDm6rIbZVE1eOFzyAnUo8a4j1sTY+GBz2xtUfpfHlbijKNKe6uPsjEYLIbzvxH4oe/l7Fdm4ntrSSzbzXe3rW4O9eTfXMLqV3Pk9i9nWT1RpLVlST3lhDfk0ty7zKy1ctwDuTi1JYQ21/JWPU27u77EMO1f5/Rpp8ideY/wJ3/CeMHIN4N0SsQGzPBEeOiyOSTuZHthZt/SLL9hxmv/zATh17l9u5XuPS1DxNv1sKtertPn32cp/9fnRbyCbIcVJrVztZLYpsgwnAz0T0/QqZ6De6BxWRqVxPbWYH7xhp4vQp2rjf84O7ahLN7A+7uKkPsqsLQ7tW4e1bj7Kkis2cD7s7n8XZugzcrcXa+yoM9Pws3DgfFHMaIE2XKi5NUiXYDYPGt5sBsJFOC0pimM/f+zo8dUYRnBiB62BAkRgJoMY11euxcQdghYg2hNGvX6MjgNQDRhu7tOKd/k9G672GqutyCozoHdufDLgFkA7z5PO7OF3F37kAdM1eAqENjr6/F27ce9pfh7NxItu5HYKIR3CtkmWQKz6h7yRITZVOniYLGasp37qQnfasu0nUFQJXFlDa1/TAJY21w7U94eOQHGa3/AGP7q5jcW0Jq7xq8vZtgz0uw+wXYtwl33xqye8rJ7Com9WYuyTdXkti5koTs75oy4nvLGNtZxv3X13D/jY2M73uFaP3HSDV+H+7Rn8A98StkW36fdMefEe38KhMtX2Hi5K8TO/rPyTZ+GK++Cu9gCfGaNdza9SpXq/8JsbN/CpnLgTnypCd8D58bgOjBh4F7xoQROyjEapK0bvUxtednSO5fS6ZuIdmDhcR2FeMKHPJLJSx3rZ8TJXe9xujuH4aLX7fzX0wHAtEnrXQgSSyFMs2CMIX5Y0E0U3wdKAMB5R1ej4d5jRgN5gTCuQFdaDaZC4ZXDxlCnSHtcRku/1eGD34vt9/YTqx6E44xr4pgT67VIm9uw3vzRZxdz+Ps3mKkhrNnPXOi3RvI7N5GRtJmf4H5reje74NbO8G5Y6I+kvCKqsxokFByCCDmed+hd97tq/BainKmPDzVtVXXCDlmYdZNkre+yoPun+PSgRe5truA2IEiOFQJ+7QKcT18bQN8bRN8XZNfWyxpnuSrG+z3e7fDni2wax3+m3Luy3Dk4O4rxzlQydiu1Yzs38BI/YuMHHmVh0df4cHhrYzUljCxbxmxNyL4e5ea4yd27uBuzQ8ycfr38cds1XSL6Hd70Hf4Xs9rQs1Wg0hzPAKIA7d6mNr706Rq1pOpW0TmUCHx3SV4byg/7dkAkt69g5E9fwuv9z9C9Kzpc5Ut0+/LjEopcBIKRFum26xR8iXcQ0Vg+PnJzxeZYX7zRpe2mmHGHwnBYa4RRnoeuSjupBbxqlTMLbzLf8ndY/+Y4dpXmTrwMun9L+Lu3o6/ewPsLYU95fi7tuLu3kF2z1Yy+zbi7t0wd9oj0+pVUm+sxd+/HKe6kLGd3w1n/hhSDw2zChgCiRk0PUPIwCbi8+SOeU/f6Fq6sCjj4afTpNJZEloKaraukFq5Rfx+Dfd7/jW3Gz/JWN1WsjUVsEtOZoExFYxW3bUZdj0PO1+ANxTnfwG+qkVmz5tiD3x9E7y5DnZXwp4y2FcK1SVk9xeTqCliqjaPsbqlTNTnEDu8AKdxHhyeD/ueg+oiMw5Te/8uifbfgtFmM4v+XqI479oPBiD6TzPzY2/RIA7c7mZq708agGTrlpA5VPw4QHYpJL5hTpTZu5XJ/a8wffJHYVg75Gr9kd3xV0ZVLJMwOLAYkJYL54qCWgUCybsCRAfp+YxtoAtYBycEiMn7MULRLuSxSxxtRXdjxEdVCmIYRvfzoOknuLhnB6N1W3GPvIpb+xLerpfwd76Av3uz6QR37zoy1etIVW80lN0ngKybGwkgNR+0E0v7c0jvX8bIzhdItfwaTN6dCecKJNZJD8KiQce8S9+8K2/Y5w9imhKdXoqEN8GoO8KIM82Yp0rAQmIC4lfg+k7iLf83o9UvMbVzMfHdEZyGlWQO5ZOpKzLmlHyV7M4qk2vkfG0L3td3wNdfwn/dkv5Of3ULib/cQPx/rYc9r8Gul3B2bia+s5LpXcXE9xXg7s+D/dLcq6H6Faj/AWj7DbhVD9mHZng10uqKZ3qpE01HWic91CAS3CbfY7iV6L4fJ1OzCad2BdlDZSR2l+FrScCbmwxPeHvWMRcSH8Xrt/Cw9ruh5/dg+oKZb1AQS+WKNNFqx143Ewh/41dbnjf3+C5MEDGDrPNNjdhAe+hPhbi1LmKmGohmozWfoOWfds2RERfJcXjQSKbvl7hT/z3c2rve2NvZui0QAuTN1/B3voK7ZyvZ6jWkasqJ799gHNTMMwFkI96+l40GcaoXk6pZzET1ZkYbfgLuXLRYN4l0EixKD1e0JeAKo0H0xzO8dLrsN8kVk+ikvpEdPMUUUSbNAiXbl7avJuBBA5z5fxk/9ho39y5j5GABD2vzeLg/l4f78pjYV0S8uoJMzQao3YovU2zvVti7w5AmIOWoOm9ux1M0aOfL8OYOM7ckXyZRXczk3gLGdxUz+vpaxt/4AG79j0L/H8Gdk6AFX1m7F59qQH3zACJPzFaqFytagCThbhPR6h8xAHHrVpE9VE5sT5kJVjwrQLL71pKuX8+DvVtIHFZ29Fdh/ApMJMzWcvIPZT2ofJMBiHwSw7wWIIGcfEcGiFgnRikLIhtINxGeIIIoiaDQWMasGNTKQaVsWDVmBv1hO5z+PA8PfZwHNZuYrN1IbH8Vmeq1ULcdf8/zsOtV/F0fxNnzItmaNaRqy4gfWE+iZiPZ/etxqqvmSBvJ7txKdvc6MvuWkqhZSrxhHXdqvheuttjZKj2AAYPmQSatsDMC4R375T1+6eE7SbxsAleFkM3MvECiPlL8TJVGAuFlR8ok8ZHpxbn9RUZO/xhjJ/8eE43fxcO6l7i/bxOj1WuJ1a3Bb6yE42tIV6/A2bfMkLdvJX51oBkOlMEBTaSthF1L8WqeI1kfYeJohAfHl3P/2HpuH/448bZfhbO74e51u1+3wKx5PUWkg4Vj7/Fh3/Yw48eZb3Rhm4elrBYjiBTRu9dAouYzFiC1BTj1FcT3lpvADLs2wt71+Puq5kTim/SBDUzv20h0/wtwSnuu1MDEPTN3qTuysA3sYIFDKiUYFv35LgqEiPHwTY2lIIw7S2uE6tKGxkIG088Gxyrdu//3SDR8L8NvbuDhntWk6jbi12+GgxvgwEb8vZsMSLzdL+PsfYF0TRWJ2jXEa6sMZfdX4eyvtAB52rZ6vdUee6pI7c0nXrOC9LE13K7+EFzYA9H4jEulaTWl/tkZ1PfQM2/LDo9/aFfraalQnKnsJAnHrs03PyqzypTCCUJd6rZQQWu2lxtAD9z8czj728Ta/hX3Gz7D7eq/yZ3qVxk9sI3pQ+tIHV5Dur6EVF0+yZpckjX5ZA8U49aVwcE1UF2Jt7+CWG0po42rGWnZzlj/p5i69C9JXP0CRLshqRBwIE7VamCVGiSz/BlfhsH0nzFd5BrbyxspqjX892tIHPh+sgc249YWBQApMwAxvocBSGUAkKdrFb1M7Nlorh3bnUviwA4499swNWhuQjETC5AgvPgNAJGn8s5WRMSIEhMKCyWt1UIWHCrXrIJpGVw/Y2qymlTd7Ah+8gZMH+b+wU+QqN1MsuZF0vt34NSsw62pxN+/FqrXQfVGQ17NetyazWRqXiB14HmSB6tIHlqDW7sW74Co6m1bt2atud7bt2vN6j1qN+HUrCVzsIL08RLuHtjCaOMvwd0ztockQJgmyR0eRm+RNqkINrbwLPyh/g75TuVJVWjBqi3lJgWaI5BYxsWzCtrwpz42zGX2c7wNYwNw6yjZ83/B9On/xFjL5xg98c+41/ApHjR8jJGGv8FYwwcZa3iNcVH9Bxk/9D1EG/4xsaM/Q6Ll10gP/BHO1Wq7tcLUdUjIaQ2KgolHdE+iQIOa33+WDtClpJ3N9SQ0fbPxpX7CAuQBXPwTotWfNP4R9atNQCG2rwRqt8C+DbB/PdSshZqqp2/3byaz60VczYMdWkaqeinj1R+Bi182m+RIu92Pxon6Lqr270xmbPqBKS3rEc3Kong3gISdFhhkOkErD2xVjRgJN246wfrwStWYAO8q7tgBJgd/gYn6DWTqCnD2b8WpfhF3/6YAHAJIJdSsgQMVeLUVZGvXk6l5idSBl0geXEvqUBlu7ZoAGCFQHm8Ftnei7G519nrky6TqqsicLOZhXQXjDZ+BK/tgUqvbNGJavz3CpDOM1k+LO73A/Jorj6jrHgHEbjFtfkyTqmLIwC80bWDs6yfDuROda8ZH32mFT3oSErfsjrfRdpg+Qvbqn+Fc+zL+tS/CtS/Atd+Ha79r6eofwd39JgeMBwMwph2SMvam9NshKAJA6Oc0vtKidjZZUcm5v3SuwZveCCkz/l7wXM4wXPg9YjUfNRVdLEAKie8vhroNVoDOAEMgeUravxktmzA+Wl0O1D5H4kAVbudPwugpM5OvqKI2/jGRaPGBOt8YQFlSbuI9AEQjpo40QFJ+ihxxrfO9h8MYWSWhqbNlQ0tAph7AZD0T5/8frh1+nujhlbgHF+PuX4e7bwf+vu341QJJJf6BEqgrwj9YhFe3Gq92A86BHWRrt5M9WIlzsNx+iNxyWgAAIABJREFUXleJ9wRyDqzmSeTWrMZTuLNmrVG1ybqNOKdKGT9cyOjBD+F1fR7GtB2b/CsFGSZJ8ZCoP4JWnRkp9wwcolOtpp2xWiy36QsxvbhnFpMaHAS40TgJINNmpl+bwqiCvNXWnjbpEYWJgEpTl6bJToEzYZfpOqopO4HvTpF1EmZDmWzaMRntM4drvPSjgeYSKFRjK8U0CcZNa1cizg0keszw8ULNFMpbA5jsFTjz6yRrP0hS0bRDFcRri+xc0EFN7lbBgSr82so5ETVKen0Z9m2DmuVwKIfsoVwmG14mPvAbJsUGd1ZIVzyuLcudJI6XfU8r8iNGus0ARPlKIzgMGzLLddQLAocy47Sr6EQP8b7Pc+Poa9xtzCd1eAkcXAZyGvfLnHoeb/8WozG8ukL8Q3kBQKrwajcFtM4C4z0ARBrmSSStRE2ZAWNszwYSBy1Aoo25jNRUMnXkH8GNg5AYMdJEjnMKpSOMkxBoxDx6vmd46XQTCg9MDRPnEEOK9JkJENif0s/ZteTKIrabKkSJMUXMGIBy6xUWNqFJreEIuU9t+F4caC9kri0MKJCmIVKuqOZfdJ7Bls4xzyetoXI/KvIQI8UkCSMs5DPpgnN76dLKhlYxa7PMOdAguj1T8Do5iN/zOdIHXzKpNn5dGfHaAkPGfzqwZk7ACAHFgXVwQDlvyu3Kx9+/FL9+KWM1hYwc/bv4V/8bZIZnKQBFZWM4fswsNdbYvNsrYkSg6SOpW0VfBJAHRnuYkpZCnVFNHkzfhPN/wp2Df4fhA4VkmwvIHsrHqyvGq8vHqy3Fq9mIe2Ajbl0x7sFc/Pp8/EPlJmTJgW1QVwEHpWLLg/dr8A8+mdzaCp5EBiC1miRcS6x6PbH6KtInS4keWcF47UpGa1+CPjltKlOpNAPVhRKDxNEaZpM79W499I7fiwMlOZTNaplY2lYRIpn+0i6PAhzKeQu9dHW4zpVWGwm09YiJss3YR8pk0DWFlpCkyeV5aofXjLaHU4DACxxRa9EZ5jSo1Y3rBFWdUXBC46qJvOngnuzl3wOPPLEHZK4pwqm0DpNmMwMQDy8zZdKO0m0/TKZ+G87+CqgtJXEw35B3sAxqy22gQcGGuVDdWji0CQ5uxtldQWpXHv6hXBIHcxmv28B002fhYYOZ95GNZarSe6rTr37ROAWq9YlPqF1u1aPGvLILi6yJpTmDQLJIa5g9z4fh0k4mjv4YD/dvJ3E4F5oEDqnIdVZLHMpDD+4cKsOpV0ivALe+HP+gkL4NDmyGulI4mA91AUgOVrwjQIxpJvPsbUiShAPrze/L6Y83VBI/WkyscQWx+kVM1FUwffKfw/B+8GQ6iv0yaJcpMYZjp7vfoXve7St1tFIVVSvLmlSKlivmod+ypL4ctyWQjFEVLJZWv2vQXG31NULWH8fxdJ1gIZRSVVRqXNmgIam0p8iohwwyxaQTQs2T9rWKzsNxHLOXeNrXdpcqxyRgKEQvsMjcnLm5Z9KgFiBBxUbFdmcAksVT5GzsKNMnvx+ncaPxQ2Vyi3mTh1aZ3DAOCiASmHMFSAXUr4H6dXj7NpLctcYIaw4Xkj5YxKiiWmd/C8aVOTBNwneY9jX6Fhi++PpdJETESixZw7bOklSmBtYkmylOrm18YsMwXM10288b2z7VsA4aC6FOam0HXs2Lxodw6nNxGueTPTKfbEMe2UOrceu24tdug9qNIMRLatQWQ22eBcrBclxFs55AzsHVPIkETmoEuk1kG9cSP1LK1KE84oeX4xxdQOpIIQ/qP0ri7Bcg8cDIApkk0pMaXLNB57t00DtDRKiQJpBmUB+K9F5kxH3wffDeJNAFzKnTgnXT0jTqcwk0Mbg2qElnYmTMfoCPrqfUEOEqBJ/RFvoj9CNVFE9FGHx5M3ESZvmrZmVmfFOrNnQ74a3q/Dm+LEDSpPwk2cCn0z2ZxVyJh/j3DzF65JM4R6qMteDVFBpwpBpW4R4shEMSls8AEAMwCdtSOLQDr+5F3AProaES6guJ7i8n3vSPyF79Y9zUJZR4orCvFLIBhgSQ1Q5P7IGIzaGRhLOTPDpZp5n13Or41Bjc3k+i++eZOPZxEkd34BxbS7ZhJXHlPx3YbgDi1FXh1OeRPTLPkgBysAq3bpsh/+BajMSQ31BbasBlNIkBSEUAkG9sLTjKA5A83kqrULsODm3AO1ZF8shqJmuKiB/Kh5OL8JpWcL9uPZMdPwsT/Qb1inzLsJEv4D1m2AfqVoB5K5lONGp2piPDQ+wH4jJBT9pEqyHU6u9gSwIdPDv5U1wUAEQ8rbQIs2RZZpmksOeazUMdXwaU9vASqWio/tnx0TjJjDNRGXn8Kd9qGhNREUDSZgY5hK05XvehC+iPbwJALJ95qJBENp0x2knmi5ZQuLHbcGcfIw0fgSMSjJX4e0vJ1JWQqi8kVZ9PtrHImN/+IQuSp20NPx1YhX+gAI6+CMc/aMx7o5GOFuDUrWKiYQuJgZ/FGzthxkQjpa4y5rUZUv1n1wBJAIVkgaP9QdwL4N03kkn9LItapL1nTOdPNOIP/hzRxh1m0so7+SLJk5t5cHIZd48uJN24Gk8MWl+J11CGd7jAkt7rs0PrLdUL1astqUPqywKqwGuowKtfPYe2DBoKoKEEt76KbP0WMod2kG1YT/bYQjInIsRPFDJxXFt3/RkkboIW9gdCdzKrFGhxipg5ULe2v6ykNe8FHKlSda3pUYMfKylDDa3P9b06LaTg+BBJT2hnO/LhIRZ04Y2Eg/Z4oqh+X8eb/3SoOXn2OXYBmO5CNHP8o0NmfWh/8an/12+q+0yKm2z7MaLcI+3fhvR1HrT/LtOHP2yiV1RvsNGmA9vM+ERPFDB9MpdsY4kZd/GG+OBpWuor4FCZvf6hqhleM583LsM9ugSvpYzx4x/GH/oSxIcgOYyTiptKPNFE0G3BpkrihTAlySwBIU3ELJF1Y6YCRMyz4FDI0Umrds8AXPk8tH836YNL4NBiOLKZ5PGtPGjK5cGp5WSOluI3rJk7HV6NP1dqLIYjK/AbC/AaNuIdegHn0Es4DRtxji0ne2IBiWPLiB/fQbrj5+DmPltWx1UMHKKeCg1I6UokBAAJOT/g70c5ajJ1JH5tp4aHGSZ9as76DjlBYAtki8cUSYZJowW3Kit0nJHWf0288RU4pIRJpZVshwPbcQ5vYPJUIZPNeZZ/5jj+NJRDQymmrV+L3yBSmk4x/tHleMeW4p0sJN30Aqn2H4crfwqxQcjGSacgqjVVZjwVvbVmaTj/p4CKQBIxYy7fL6mCW5YFHH8YEtUw/pskWz9K9nAZ1ETMRIx1qDYYcyZ1fA1O4zOAQw9jOqcc//AcSB0RAqR+qwWIwCGt1liKc6SYeH0BiRObud/wYaZO/yKkZGrFSCWSJnPK5mfJeQ0UxGMiV3+IA2ymqhWXdnLNdux3CKPP9TFkqpj6COqTSZL+/UCI3CF2/Q8Ybf00ySNbLUBq1oGobi1OYznRk8VMNRWSlYCdy9gfLofDxXA4DznlHLIpTv7hUrwjefhH8gwPZA9vxm1+jQdHNnO/9R+Zbau1dimbcEwdcw28xlK+t5YxK/iufwqLi8x6EBN5kWUQ6uJUL/7Ir+Fe/hST9ZW4tQVQuxRqta5Ys+Mb4dB6aNxgESsmD5n9ads5do7pVAOQPPzGQryGDTgNmw1gDUAa1uEcXke8roTEkUruH67iYcvfgzt/AdnL4GgpsawMJRfK1LKZy+E8hkWMNIY0jAASgkQdJdH51y8xlqpqJh2bnJlOaxJTxfmGGOn/UUabt5I5oiiTQvqiUuM8e0cKiZ8oJnailOyROQjGkGdkQTSugsY8oz28hnV4jeV44gsB5fBa3MPbcI5v537jSm4c3Ub6whdtpRhV59dgy4IOMtY1stYHF1gUrPKIRLlPQkvtLYxAD/mwlvTQDzPduo2E0tYP7oD656FuG+zfDvuVyr4eDlaCNMiRCvy5tjr3SNmcicYyOFKCf6QE92gJ7pE1uI3r8Ru24TdsIXmwlOThIlKtFUy2v8h474/C2C6zTUJoVdn5ChsMmq1AbM0s63Q+8s6kUWRu6ci/2kDRVECcaSY91X1Ut2gtfBRGD/Kg5UNMnVqFe7QIDhdBQyE05MORlfjHckkfLzOJpe4zjL/GnSOFcCQXjq3AP1qIf3ijGXsatsDh9XBsDenGQmInyhk9uZV77Z+G+18CX0JSmylZB01NCBALEssPkYdcYJrbJlpialqNNeFc+vdEOz7CZGMp2cMb4ejLcOQDcPAFqN0OhzbbUJom/BpD5g4Z/Wnb8Py5tBVwuMoA1D2+EufECpxjZThH1hvJIYBogU5aczany0h2r+P28RdJX/lVu5FmQuvtbUc8Aomteh7+bftPjrK80TCEIa2iCNU3IV/821gRaeJ1iklGmcTsMSQOmxyAi/+esRNVJE8shKO50JgPjSuNv2gY+ViuHaejFbhH5zLu4TkCSBEcXYl/IgfxgNu4EbfhRfz6HdCwEY7bbA+/ZS2pjm3cPL6O6NA/hWSdWQVrXIxZ4XaBwwQFA20SGeE8ca7hch9nopn4lV9lsvtjTDVVkjieS+p4Hl5TKeljVcTq15I6vBZOrYNTRbhHlsOxYjhaCkfL59jq3DnSkUo4vBWOrMM7uRS3aRHuiSLco2vxjmyynzeU4B/Nh5Z8Uq0FPDhRznjH98L1P4eoainZ2sICgt3yTHudaDsHK1FCoEhaKnxpTS2p578GiGbRJ5hmTCaqOEuK9eGfE2/7hNEQcpQ5mmfp+HI4sRKOFbwNz8xx/MV75nqr8E6swj1RgHusEq9RAvwFOLwJGpfhH1sEp8rJNK/l4ckiRjtewb/xKyYbnawmZAMtEqxAFEBCikS5RUZrE9whssP/i8nuTzF2tJh4YwE0lxA/lkPs5BLGGldy9+AqJo8V4Xeuga5csicW4R2f9cAhWN5rO1dghOeFAGncYDuoaRFOkzRJEd7R1SBqLIKThWQOLyJ+dAmp1jU8PPYSMc2NxNpmOkeTXjaLQLPOsRl1GwJFALKhP4FExQCCGXGZpn9FX7LTJ0iaYnFW7I7BvX/DpMyeYyU2BG8kfAHeiZW4J1fhHRMYVsORUjgSCte5AqQIjq2C46vwjhfhHi+1dGw13pEN0LgWrYOnKRfv6ApSR1eRbClnpKmKye6PwvgfgDKOBRAZA8EiwVCLCCSRTDZqioulbuxntOVzJJtfhpMlODUqCFBEtG4BY4efY+T4Ukab8phoKSTenofTtRg6l+E15eKfKMY/XvK2LSdKzfXettV3z0LHK+HIFji6BY5X4jcV4LUuNERTnr22BuJoARxfin8yl8zJKmInX2Sy6XsYPqnatNfMwiY/JSfclkBS/ELLUdVRIoHEmlryOYI5Eb0VfYcDJJlM4rrGw5gRA0plUbE59Uk438h0FG7W4PR+F7GGCN6BFXB0HWgW/dhqOzanivCPr4Nj66zFIRAdfxYeKIETeXCyAP9EFd7JKmPteKfyMON/UsK7HATKk0vh1EqyJzcRP7WFyY4S7rV/GGInbE2xVAIv7ZqJWo23lOG0r1ysWILszS7GOn6V0caPkT2sybw8qF8LTdugZQ20l5LtLCPRUUa8o5hE6yrSzYtwTi0OAFJowXHiG1uBjZOS4m/X6rOA3glIOubtvj8hLbEBjm6CYxvwmkpw2ubjtEfwmxcbyYGcRCPFVhnV7h3bSPbEVuLN6xhp+Qjc/e8Q7wHtXJuJ4SY8m2wYzLiGGkSmlmETASLAyaPZtxne+Y57k1UF+VkvgSOVSpHJZMxkm9GsWpE60YV76TdJd1bhHo9Ag6JLSknaEoxNqVmK4J3YYAEipj1WBCeeYfwNXxSYa4QA0XIHp2UlfstCaM6xADy22gLpRAH+8U2kT1UR75rHWOdquP3FIMviAXhR9LwSjgKIwB9h/C7x7v/O2PHvMrOOnFA4Ls+mghypgJPr4dQGaN0E7evw2yugvQRayuCUkFlkpIPfNJdW54RUErx/760BzbEqK6mOS3qU4LTm4LRF8Noi0LwQjss5FEgqbMqDgHSykmzLSqbaS7nb8Qmc2/8Zkn2QiJuisr7WUwWJd9bcUgasXU6qyKARnValfMdrkFnYMFojnU4jraLWJHuqH5K38O/9IdG+v028qQhOSVrLzKqAxu2g9KSmSpymciPlpe2NVDcAeYbxF0CkgY6X4mvsT5WSPbXaZJmLB/zWCDTJTy4H45NughOr8ZpXku6MMNWZS7Tvx+HO18HrM9u5+dkJ4umMqYov6yHC6HmiLb/JyPHV0LMYWjT5kgtHFkDjAlAlioPFpOvLSB8pIXN8hbH3OVYGRyugqdR2hjpkTlQMp+ZI+u3ja+DYWjhRgd9cjNu6Cqd9MW5HBNrmQdMSOFYIjZq3kaapNOrXb1tAqnMxd04UMNb1d+HG/4DoDSs2BBAzy6p1G9q9V6T3wfJSM2nyV8PECgGi7dlkakmDhCaWieIlJ01KefLSZxlrryB6LA9OFUCr+KECjm6DYy/gN63DbS6DpjI4KbOq2FLTHMdePKPxP1Fh6VQxbnMxmZZSMq35OO0L8NvnQctiOKmMi+ct6bdbF+F2Rkh0LmOi9WOkz/0bmHzd1ghQqkx6ykasDUCifcT6/h13W9cy0TmPZPNyaFkCp8VgEWhZhH8qzyDUbcrHPbUQ9+RCM/egORBOlUFLwZOpOR/eieYEqgCM6tzjZdb8kt3ZnIfTIhVbGHRQBP/UAjiuUKMiGpIgCj4shrbncDqeI9a8gPGmtaS7fgzu7oLkHe1nYBas+07UOO42/UCTR3Yy0ZhYf0V8kLCwtVqB5LGXKqmPt8Dl32SyZzvjrYtJyKIwTFgIHRqbdXBiI37TGiPANEacWgUnCi3jPuv4Gx9WwKvAbS4l07aCdMcy0vKTW/OgdbH9vaObQdSkiOYi/LbnSLevINYmf+Rv41xWTed9wEV8Z8xsnaDxjpC+Cnd3M33lJ5m88Alig99Dov9lol1FjJzIIdO5AbdtB27LDvzW9fgdi6H9OWjRepBiKykkLZ5E7wQefddcNHc6Jd9GpOvkGpC6LavJtpbjti/Ha8+xdujJXDhaaemkgCSALIT2+dC6ALelkHTLh/H6fgZG3gDnnNlsJjl536TFS3M8CveGVpU+lX0upHznvkKN8VZH3Txx5oZZJ+/2fpqp9nyi7QtJN5eQPVlmwdBRbiKhSHM0yyzPh9YlVgCfyoemkrmPvfjmlPwXgbAcmlbjtZSQ6VxKunM5mbYysq1l0Cq+yIWT5ZbEJ61LoXUlbls+XncZE80bmOz6Xrj/e+B2KlZt19QYJ13FAlLjuNNncSaPQ3QPzv0vMXH+3zLe86+Y6vgcsbafJtn8z0m3fJxM1woy3REyHRGy0jDtK6BVKvUJpE55Ij0jQKSZZO/KzhRYTpXjtpTbzmkrwW0rwD+10n5/cjmcXAUGIOqkJQFp0EqMepY0yV74LEx+GZIDZKeUOvFoD0aBRGkpZnvrmXV8Asp37is0q6RBwpc+m5qaInG/iWj3D+C0VxqJnWpbbswbmTlO8xoj0WlbDm3q65XQovcSSvOtQNOYPZOAVPBGjC8AFuC15ZLpyCfZWUC6XQCRv1EZgFPAXGjvwQjmcgMoTq8g1Z7PWPsHmTr3L2F8f1CIW7V9IaI1I1pPnFbRQVMoSTVWr4FzEbyLMNIM9w7BzTfg8m+RufgPiJ17kYnezYx3bCTd+jJOy6s4LS/itm7DbduI37YaX458Rz62g4TYgNRJLeqs3MD0eitIZLIV4bWtxGtbjteabx7EPuhqa8pJKrQKeJIOS+CUACJptBq3tYxsW4mRHnrvyR4WSKQ1TikmLpNPYNY9rIQuResK8ZoWku5YQqy3itjQ98OD/2krhodhLNXaDXK3lOCoORNVC58Jos947gJMSKEdFrLWW9vw+0fM99YjvvFvHTv7+sERb7F+vvG8Wcfp2JCMFgwnPYPU/mBdhCZHwyW11gOTxozhJm8yMdzC5MUv8PDoFtzWZdBZQKYtj3RrLm5HCdmWSpInCvDalwSaejm0CiALLZnx19iHJLBIy4iK8FpFBUbKu225hvnFC9ac17EKEgXgMD7PcmM1CCDpjiIyhgfK8Zqr7LHmd2X5iBdK4FQVXrP4NA+vp5ypzud50PV9pG79PtBrsibcdErp7rM6y3SyOkFBrqQtja9dUc1S0HG0nx3eADgdkGmD1Ammu3+HVOevkez4HIn2Hyba/kmibS8Saysn0bYMvzcHTs+DLmkbRZbmwcn5cCwHjkj654LUrbRBSwlKCXAUVj49n2xPhGRbDpm2cpyWl8k0bSPVtIx0i663AnpLwNiZYvhi/NZSQ15bMV6bfe+3hWZgqOH0d/CZAhIKNhzXdZZC53y8vgWk+0pJnP4E453K/r0KsYytjKCZI8OeWr6kPKQkqewkjjONp6WyBjBxfD+K40Uxc0yGmR9lAAdsOhMrDhfoaKJy9lA89kf4hTHnwvFREFKMHVxKuAmPe1Ib4nF2kCGtPdqDfdqVm5SeJp0cIZoeYcofNxOBYyZbz8wMmN3DmK4hdeHfMNr0UTJtimzm4bcJDCEVojHw2wrx21fhy8poywtoFbSJgvFoXmV8RxMRba7Ab60w45/pLCTVvYrk6aUkexaTOr2ITPdSnI5VZmy9lg34LeusCdW2xJjTbscSAxIDKAnW1gBIBnShQC2E5lJoroDmdbintnGvbjX3jlYy2vMhRs78E+LDvwsZ1TEYeRuAPKFzTeKjQp8qjZX2SKdc/MQUZJWuMQSJdhivgTtfxr/6a2Qv/ATZc/+QB03PM9a0jqlTFSSay0g3y4kuNSFlMTRdq/E6y030IXGqmERTKanWPLI9C/AGFpLpWoXftQE6PoDb+hLptkJSbYtwupbC6VVmYASCuRD6fWO/llrV27nQBid6l+L2rCfd9Sm48QV4cBQS9yFjCiqZ6Q8rQlwyaKGQdiHRbubSKAk8L2FLkqrqvUkPDpg3QIedtbf1qbRG3G4oGWiRsP9DZp7dmu/0gUAi1faWRV76Ssc8FkV4y0rJ8HohSJJBKSEJQbN92SiOPxqUBkrywFHdSNcsozXrPKL7cK/8IomeD5HUOLYXBmNQ8NStAY20T+uKwDKwgi3bUUyqq4Dk6RUGHMmeBaR6FpDpXozTucIAT/NzApPAkO1YOousJnHb5WMoB68YTpdCt3itFDpk2ZSYuT3a1kLXazjN20l2ljPds5oH3duZOP9ZmPg6+L08vj/II/H2De9kXphCG9peN+uS0QaJaS3JUpEBbUg/aXdl1TpqLdNNaOuvq7ao2d1quPUVvKufJzX0fxEd/D7G+j7CWM8Oome2MN27jrG29Yw2lTHZstSEX+nLh4FKaFday0Y4vRm6NuO0bTKZuYnOJWROL8KXGddeNDdqk6ottoMjqdaeCx1F0FmB172WdO9Wov0fxLn+4zD9v8A7axfOyMIxeTvSIirZM2FaUw5ADKjvQ69erfg5OEcdqzRqFQ5QqoYFR3hAwMxvZeKQmdXqOuHfBhCBRjG5YdIooU04u9Uxb0M6X1pR92h8qmEy3CLBXVOlS1eQTDC/qe2nJ0+QvfILTHa/xlR7LunORfid6rP8OZLOlXmWAxJOXTn4nctxO3NxO/Mtda00n9G5HDpWgpjemNgy11YabZPqqCDVWUqqU8DKM0I1073MaJx01xIe0VLrQ3etItOVZzQe/VXQVwI9EdLdEaY7lzLR/Tzu5Z+H+J+/O0AEjJBCgGgPakfFBVzHbHWVcFLGoTECVuMQMofpYY2N6uA8hPQgJI5A9HX86S/jTf4W0Zs/TvTqP2bq/A8x2fdJpk9vJdFdQbZD4FhrHT1FyLoK8bvKcNu3km7fQrxbEmaZ6UTaCwOAzKHVtY26z8NeZzV0VOJ1VZDpLWaqbwWT59YRv/YDMPoFSPWCmEUZJ1qVSNqYIZOoBI9vhIhhKPVByKN6L0YTQwZvw69kYs0s6Q21TQgQtSEYZrch0+ozk1EcACNYjz5jIpu5YJliYUJIYDrPmNCPcmm0Pj/JQ6a4yzSjpsCBWQKh8Uw8hFvVpPp/gWjnh4h2FpLuysE5LYZeOUdw5Btw6Xy/axm+wBEAxABOQk8CrK3IRkhbQodeJpL8xxzr/LeXmCCB07Eap6MCp7MEp6sQpyufbHcusZYc4go/ty0h2b6UlELAinLJMjGgWwYdEdxmmfMR3IEI8Z4iJrv+Jt7Fn353gMzUGnjrxlOqwOFbASTrW6TiZSFpd1UzZZ/FgEemmeeEeymo5pO83jH8dDskD8LUXnjwP+Dar+Ke/Qzprg+Sbi0j0zwfpyVCtjWC07aEbMdqMt2bSfZUkTxdjttVbBxEOiX5C3jqVuAyWiiUgrpeMV5XAU7PCtKDOUwN5jAyWMjEhe8iO/xvYbIBEhNGEIj39cyBR2Aqk5jl66GgCJk5ZPqA58OPH/Mhws42TC/zaRbjP/Ze380qHxSCT79hflzw0x3J5HvkG1ng6MZ0QmCO6TIKZHqPjtaZGipzmPacvPtnuAN/n2RrCamWZZaxThfA6aDPu/JgDmT6uLsUp6sct7PcmNq0r4Y2BWPWQnOQOd6kQMoaE8o1Gl9zKc2Knsq3CfwZ+Zptpfbc1rXQKv8kIJlSum57CX5HIZ78mI4VZsrCO6G5sgiJpgjxtgjZMxGifUsZ73wJZ+gH3h0g0haimbELNEooCbWb6Zg/kwQeBD89s/zd1uNQ7SbPTN0nHEhqP/UEJGKgTUgd1XkyVUDugXcJEi0w8jrc/E9w9V8YU8fpXkCyPUKiPUK6vYRM90bSfevJ9K3F7Q6BIXA8JWlQu1daEsA6iq0W6cjF61xKtlt+0HxSg88xPfgco70FjPZ+F6kLKox9GOIjNgYc+g1GSfhknCzxRIppVZcPlnTObi13iptnuQvSBobpW/u1AAAgAElEQVTBw0iSmFwSX+wqCqW/PhdApHmCc2bODZjecLaAIAqON5GpxwMBOl3Va6TgtUZf1T5mLC79XvYc2cu/Ded/EPrXQvcC6BDlQFe+te3nAIwQTJ7ModMFZE4X48gX7VDe3xpoXWNzAAWQpg3QpOUVa6BZ5rAm/wSMFdAus2uJpfaV0C5zW2NYZq/TVgkdG6FjA7Svx2+rxGkuI3WqhGRTEfGTinZuxu3cQra3nNRgKfGBKsb7XyN16Sdg/D+/O0A0UaS4dzijOns2VR0cDqM6Vqu8tVN2nAfEuE2Cm6S4Tdps7jhhlLhklzHZxAeu3Wdc+1QYZ1bF0hRvFnomJmCyBy7+PAy9QmZgAfFuAaSMbNdG3L5S/L58/O58mCudXgH986FP6l3Om6RMkYm6eJ0RXEXe+nLw+paT6VtJ7HQhk53rifV+DIZ+Dq5+Be6dhrG7kBizRd/MRpEJprwkDzOquWs1jPJ6JArUZ9Zx12KtoPxpaD4ZgIQH6Q91jCgAkzk3+D441jr5ykLW/Iwt56/fMBQIs3AjJNO+xWrzFVTw0wYoRvkId6lpstNv4D78Wcb7N+D0F0LfGji9Gjplii4BZXP3LA8ETC50Pz35PcvIDkQMub0r8but9raCShpB0UUBpgI6ZGYvg57n8PsieL05NkLasxB65sPpoO3VeNoxpW8x9OVCfwl+/zr8/m3Q9xp+/3dB/8dxej8DF34Xd+jLcPO/woMvkb31X0nf/EtTYhfn3nsHSAgSI/Vm/acCcyILHMFFal2p45pPuYNnViuqHcFjHOkWVQT0Xc9YWcbq0vyC4QchJli4p+IYIknqG79AamgLsf4ckt0FuKfL8HuKoEca4OkHJjxHA8Tgc/j98w3QvM5S/I4AIB3zcZXL02s1TLYzD6e7FG9gLe7AJjI9zxNt+YTdj+LqmzB2DNyztqIHWnSl1XYm99HssiugCCTCgl0LHdQoDRjdcrTtWBvynV3uZ5Yror4K+svCR9saq2qkStbY2YoQVo8BIzxvFkCM/2PGSoWwo5CagOlrZO/tYfziP+Nezxrigwtx+iK2n00EqBDkd3QrfC9aMecx8HtW4PblkO1fhNe7BL9nCfSE11WrlKcV+KdzcXsKSfWWEOtbw1R/FRP9m5ns20ayZysp0entJHu2E+/ZQaz3RaZ7X2Gq50Okzv0d0kM/gHv5x+DmL8Dd34SRL8DEV2ByP4xfIHvnEkQvgn/Njp+CTupETRTO4vU5vBUyZMTOIv09Q2FUJTQVQps4XLoqyRdIOyP9BLBAaEolhYffbyZ55WeZPqsiYItwJSVkTyoPTCZWz6q5Ue9y/L7F+P05RlPQuxxkW0ubGCqC7lXQLSCWQPdq6KrE75FEkgRbSeJUFdnOT8LQ5+D+VyDeaje1N7UxrPYI/TOjZV3XhMiduEs2ljUVFNPOFBlPJULF6KpwKVJwVaVoRCqXak2gpI8xV5Ne0JpAlN0YT6asjpMuMUVrfG2HbPfsMAWmFa71M2Qczd/ESKfDQndX7bzWw68Qv/gjTPR+gMmuzcS7V5s5oWz/KpyBHDzzzHm2j06vhNPqrzn2vcasu5xsy3YTmfT7lsPZCJyP4A3KUY4w2RkhdSFC8sJSYhe3Ebv4aaYv/Sumr/4Hpm7+B6K3fpvYtf9C4voXyd7+U3iwEyZk+nZCesguqXVGzF4hpjq+CnVoBaFK+GjORwWFJbFMirYV7D7jQSEP64c9I0AEs3BCUVPy+kFbKcIgcLbpICfG2MQCi25GIAm26TXhTlUWETPYpEBpFqPydfhkGu/OHuKX/qlxmJNKpOyQk7jeDpak/BzJ712FGRxjaklNSzqWQKfs10roEkgEkIKABMjlMBCx1CtToxS371WcMz+Ie+XX4OHXINkO/k18Rs0KxaSXJOlmTd1ps/4o0BxZL0nW1NC1GyGEhctsETNVXFHFS81oPwqKGbAEGkmgC0ldpe8eeR2BSacKHdkkmeQkWWkJV7pNY3AP6IP4Trw7nyd1/h+QHqjC61sJfUUwUIHTU066v4jk2RxSZxeSHViO17fKAuNZwBEAhK4PQOfL+D0FeAPzcM5FSJ2PED0zj9GBFSRvvED85qeJ3/4lUvf/B9mxo/iJPvDOABfsgjd3GNwR+1wh0wcaYCaiqh0lAtktv8vUT9beQmHcyFS/j5tKxyr7E/Zx5Bsmld46yfSOf+suQi9Erf7WyIcOp1UP8jmkKozvEah4fWTUh0BkTpF5oOrrsrJkuz8we1joigZX02M4t/6EyfNVTPVG8HsXwflSAwwx+JxI4JA2kLlm7NWF0KsJSGmRSujURFJpICVzQJqrV8eEtNisrNQMb6p7Gan+IjLnN+Fc+wT+7Z/CHf7PkD4JqSugOSP5zDJJg6Wd5tlCFaruMCnlvgmfy9FPOtrfS7P2tuC2ClUn3/Jv0plmyomZIngJ3xZTNf2tPhUJWWHgylTxiEL6ISRuQ6KD2J3PE7v+A8TOvkqmrwp61tgMhe4VZl5C/ldqYBWxMwXEzq4ieXYx2QEJBYEkz7TSpHMhc43+EhhcA4MbcPrKiPYuY2KwgOjlV0ne+imI1sPEdZiSbyrhK6tDGla1q4JAhfrQc021+2wmTiodJZGeJpqaCjYLst0gDn0raQysHBf/2cqKj8xVlR41vRj25tO2spPtTHCYKhEqDdMGdm94E2pnkwFJeEJQh0gAiZJkmltEGTaAMbeoJxs5TfL6DzN1tojU4AI4vwA0WP3LoF8O91O2koS9hdBTDL35oL/NZ7n2MzMLq+/0uX7nORiYB/0LoHeFOTbVtYBk7zwy/RGcs/Pwzi3COZtHZnAj8b6Pw93fgIf/G6ZaIH0zcOS1eUvW1OD1ZDNJBejBxcxhZwVDoWohUspWfEibqD69FnBpjYpMMn1jB9lEGnWNUEmbCIFCh5q3eQDuZUi2wsguYtf+iPGhX+Th4HczMbiGeO9yXK0HklaQ+dS1yKTe+P0ryAzkkzhTSvxsMalzS3DOLLL9HfbXXNv+JXjdETi3DC5WkT2zgdHTVYye+QjZe78M6Xq7kWKgIhXpVoFvdVnMc4m5jonAKZ9Q2M9mfLPSMZNNkHaiyHQN657ZfrLWibpodldbGaUIn0o8CYA2amgrKz4DQGzKxONMbwAgtRXSrJvRoiO7tkKFEe6ZTTWN3hNIBKYA4eoPYVll/RNmB4yslYKxEbMpZOrazzBxMY/xM2LK56B/SQCOubSLoE+OpsKYMtnKLEgGnoPBSHBtgUHREIFQQFF0ZAX+wDK8C4twLi4ge24h7uCSQJLmQW8xfs8asn1byJ79CM7lz+Ld/XUY/7pNy3HugqJ2GqnZYi3U7eFnihQnbAVDL+0bU0m5X9o2IasJSwFJnR5eR+CwHWjb2ChM9cCD1+H2b5C69hmmL36IyaF1TA+VkDxbQuZMAU7vIrPKLq3VmFps1rsAzhZBr8zHMpz+YgMUZyAXd2AZDEg4LXjUP6Zv1D9PQQOLyZ6O4A5GyAxFSFwoIXrxk2Tu/DokjoP/wFTz1opeJ4UpF6qa0Yp6ahMgU+ZUyHgrxxsmnFENto8MCtRZYehbroFsrVC6BBZN+L2ZdFXp0W8FQGZpCv1+KOGsWpR61IYuD5BDZMwsHSB//y1jLR4xm904U2aDeFIqqHoPRvYyeePT3DuziszZlTCwdI60BAYWghzEnnLoqYRe2d5LrcMop3FwCd5gLt5AkSEGVsLgUhhcjndmCdmhRTiXFsOlfLhQCufLYKDISGLNEDunl5DoXs5EVyljp19kcvAfkrr6y3D/yza12u8B9xw4lyGj4toPQflRiRTE3EfgERNkpU60h7N8Ny17jEImjukXTS5pdZ9209I+h8mzkFQm9p/DzX9n0vhj/a8x1VNMtH8B6XMRGFK4VH2QB2dXwJnnyPZFSPdE8Pol2eXbCSRltu0rAFH/SnMsZ2z/mD5RvzwtDS6BcxHivRHGTkeIDr0AD38dsifBVdjcagWlnaeUca6ijYGbq02BTJhTfaF+MTRr7/JQwDwGFplockQUPAoA8tj3gVkq4ATHRd5G/s9i7/DsJ7UhGnU3eh8au0Jq8LL6y3L/W9+H0k+XD7SIjb9oNyYb01fJRy+TxpeZoH23tVdf7AqJe/uYvvxLJBXbVkfPhfqXW80gp3Qgx4R8zaCfmR8AYyXeQB7uQBHuQImlwQLcMyvxzi7EPbuQZJ9MkGI4VwEXVsOFcjhTjNujXKXlMFiC119CuqeIeHehSWNI9pfinFtL8uJ2Jm/8LcZv/xDT93+S9Oj/B9NfhMROSJ+ATD8412xCaHYEU9hPrcwlkXcrAFcbJPeTnfxjEg9+hanhzzJ57buZurKVyaFyEufzyJ5bhX9mFQzkQ48icuUm58wsk+4shDOFcGkV/pUcspeso5wxgQgFLpbbYEWnzivARPsG50PQTwwoaDEHOpODfz5CvC+HidOvkLn4CzDRaKteah2OapYZ5ah6AFpeoK0lFPtXHyjJUoEGmUWWFMOTESVBrKkEV6onVABv5T39Lf4LNUjY6jOT8mN5O9Aguo2QW5+21U1Ir4tC2M4Cy+z8ovAm1M7GXPi5btpcR4FRXctmz5pGE4jZuO21dJLM5D0y9w8x1vd9JAfXkRkswT2z3A7aGZlHOVYTDIpJl+KdEc0C0oDMseXQK6koiRh5pDXOKtS43IJDgAgoa9o88zvuufkGIJwXcFS1cRVRrSfpzCGl+ZUzBTBURqYnD29A70vgSjFcWW4kt8yK6b4Ik5dzGL2Sz/iVtUxce4mpqx8nevUHiV/5KWKXfxnvzn/Bu/0l3Nt/gnvrT8ne/jMyt//MhDWzw3/M9LXfYPL6zzNx7UcYu/JJRi5t4eHFlYwMqdxAhNGBCNGBiIkOcUFh1FwYkLbcAF3roXuNiQhmOpeQ6FtI9sIS3GuL8a9H4FLE9ouWLGi+qUuRw1zokwZdaISE6degj42Qmq3N1e/qx8GVVgsP6rdX2n7vk+ZaTrwvzwQI0hc/B7e/DmMXIWHNJpM5bgAi01xb1N0K9pdX1oXdnFOTnOH+KWFrLRWBJvSPQ/4OzKgQLGL1t5LhQZkzOkfFq81LR+k1l1bnhNz+1l8L7CZdOryp2a0On/23uQd7YzN3Hn4fPokBXPgAw8Qe/jfunP8h7g++SOJSMdmhYGCv5sKFIvzBfOM0Z8+tIHt+Kd65pXBGJlng0A/KtJBJtQDOzTetf24R/tml+GeXPUbezN9Lsccstt+fWYEvmvl+1nmPfW7Ps781D//8PJyh+WSHFpAdWkxmaBmZ83lkzhaTOVNBZnAdqcGNpAY3kxrcSmpgu6HkYNhuJXlmM/Gz60meW0PyfCmpoVVkhhaTvTCf7IUIzoUI3lDESGr7fIvs8+q5B6VRVoHpg2Wmb7xzi3HOL8I7vwDf9IeOXwyDmlQNyPTXYnO8NJP61j+72PbhwCIQnVEfLsYZWk7qfB6pc6U4Z8vgrLSVwsjSTCXcaf0oYxd/Bab2WG2onXy1D6MsoWCLd+tiaApAQQlpEjnRllcfTarKyVbQSK2lGR6a4WvDYI/+C3krbB99M4OcZ5wHeeyK78Mfk2TSzTy8818YufpZoldfJHrWJpxxYQWcL8Y7U0j2bKFhnNTQMpzzGkgNYuC3nBWTv090TqCcZ+xw2eJzITGxf07MPFeyjDyXPpDAyJ7LI3s23wgII2gGF4EijGd1P4vInFtC4twK4mfzyZ4rsZpUWqx/vqm1Nn7mXzI9/HVIdmH3qlG6keVPhaslLi1ANBckTaKoXbBNYCBfv5WM920OEEmSGzjJg2Qe/g7pW59lerCS6e75NtJyvgD3bBHOuRLSQ/lkLyzHHVpow8NnckB0LgfOv1+ke3nOzB7PrdW5CnfrOnOlZ3j2czJp7YSiWbszKA2x/LE+VUg4O5hDdnAhvo6/WAgX8412muqvxB/5HfyYIlZK9VAoOmXTxGUkBJFNa5/IDxE47D6Nssol+L/Vr29zgMjXUSRMdmsjjH2BzOVPm5VhSUWmhlbinCsykit7vojs0ErcC2IoSevn4OxCOCfGegYmeaZz9du6HzH6XCiYC5ozOJ7x2c/Jj5A2CEAyWIF7Lh/3/DLcoRz8oQX4Z+bBWaslvbPPkT6zgtT5CjJXXiB54zPgVQPnbA6UP24mVJ1k1gaRAh9aGiPc0MZuSRGA4/8HhHybA0TLSpQHFofMXRvWfPgfyVz4W8QHC3AuLsW5UIBzvtSQO7QKLs7DOqvz4Px8OL8ILryPJI32rHRhIcyZnuHZBUwBQNGsc4vh/AqcoWIyFwpxLy6Ciza3yrSXrB80PbiUiaEPkb3972HyEDAUVBEZA2cSJxkjGU+YZd2O6xt/QuB45HAHfu3b+g3ffH3ybQ0Q9VEyyBq3AbQHkKiD4Z8mfqGM9JX5ZC6uMoMmkLhDeXbQNHBy5gUQMef7CpBFAUDm2M4ZGCGongEgQ4H/pPki9eeF+TgXVpG5mGcBoiiY+vpqBK5F8C+tJHbhg8Su/zKMd9mJUi3ZVl6eF8XNREmnEqb2bzqbQbvnWsfbAsR6JIFtFcaDvsVa5NseIJkw3V7uiJYxpgfgweeJXqkgcT1C5tJynKFC3POleOdnA2TeI8n9zEwWMtvTtrOYc2ixBerTtu/bvetZA2AYcFgweJciuJfmmwwDo60FjqsREzb2br8Gd34fJs7a1BpZyCYY5Zg9610nY0ubellcX4ZVOF0QtoH20DlyQtT+NUCerDbVN4pwKK0lE7VFOUwh5YdfYPL6ahK3IqQvLzKawztXDAKIBtVoEJk2YsqnZepv9vHSHIvnRkbzfbPv52muJ60R9Kf69DGah3dpHt61CPGLS5m8uJXM8Ocg3mUHLVwVocltOeOhoDM8H4ZsBQyFtNTOWkH51wB5MihmfxMCRGsjTDxQc5XOBKS/xtjtSkavRshcXYB3cSWcL7STZBpEqf4LS/AvrMC/mIN/acH7RIvs719cgj8n0r0vep/uXX22CG4sw72cg2f8uQVwcQlcXoh/KULsfITJC0VMX/t7ZLQvYKzT1uCSz+hlTI0CZTYrSjV7rtgu5HoLOKQuNOAijXdIsxniW/D+29zEUjzcIe27NpVewsZVbtJeRu+s5+H1COnr8+CSIlrSHsvh0jy4PM/Yw97lVfiXF5sB1aC+L3Rpsb2HS0ufukU5YJdz3p/7vrwQ70oO8cs5pK6thGv5cKUALmqCdjnOhRyiQ2vI3v8XpB9+hezEOUgZO9gWkvAn8bLKywusgEDGWQyEk8WB5ngrOEKQqP0Wv77NAaKYuGZYlbvl4GeVyKe850OM3N3G6M0QIDKllsIFMVMErszDubyK7JU8vCuL8a/Mf59oocl9EqMJqE/bKm/Kv6JrPAvN/dmz1+YzdT1CbHgp7s0KuFJhtfKFUvwrH8e9qYrpbZAew/fs/IWiUWaxljuFrw2LvgEgdgnFjGllEfNIY4RFML7FwAgv/20NEClnVTPMKn6ueirelF0qljnF6L0PMHYrggaRSznW11CI93IE/6qK2q8kbQAiJps7kzzrud5VK4kFjqemq88CDJ37bM+tvo3fWcz0neVMX80jeqGY5MX1cOP7YOSLEDsNWWUdWjNK/qIAYUBiFnBoCfAjDTLznQ1JfqPfEVhZOk6LpRT+tcgJ2fmb336bA8TD99K4qupoikQoy1MiqZ+Je59i4lYOzjXZxEpHj1jtEURU0tdXkL6+yiTmce053he6ugD/2iLcOZLO5apowbPRHJ9ffZu6XcbEjXxGrkcYv1lK+s6/gIk3ID5uI1WBby1RpjWAqqWmajZmnLQc4zGAyDmXnSxnUl58wP86XuCYmVlXzq6WI2vlvQb8W/f69gaIOtBRRpvWWA/bNdYSKpnrTN39DNO3i3CursLY6gLIFRuPd29ESN1YQuLmCsOc7ws4rj2Hb2gB7vUFFiRzaN8vcKjP3KsrSF5ea9Lqo3eex5n6p5B5E3zt92gDUI7WcHguGT9lSNEqJSEaVaLIVYAVtdb8CjPDNQEcHKcvtRRmJjdLK/VjM6b1tw4ez1zV5Ft5a+/h2gKIBI4q0JlU6LvBwDwgevcnid3egHe1CC4vseBQTP5GBO9WhMRwDonbS3FvLAQ58u8LzYfr8/FuiBbOoV0I156Rruse5vj8V3Nxr+yAG/8ERv4Akk1mrYakuoYl5vlmWWzKyZr14mZfu0BDSFPITJL8F+m91gLNFMkzi5ZmAWSWtlFJpYTZy16ZvZKI37rXt78Gyfi2IIJZTKNUaYFmgqm7v8T07VfIXiuxTBTM5gog7q0IyeH5hrybFjT6/BGJYcQ4Ak9Ilpm5MW/WcW85R98F5z1i+Hl4TzxH54fnBNc357/H999wXd3bbGafHwBoCVxdakkmp55p9rla+xHchwVreB2dn2OvoWMM5cD1HNRv2WtFMPxD8FD+Rp9d3aiSs6aMaYqEaoR4abP2/tEiJIFAzrnW1atVnlWoPULzSu1jDsfbahBpkb82sd5ROEj2hLULg8VVEiiph7jRL5Gd+H4SwyW4dyJwOwK3xAg5cHMZ7vB8nLsR0Hez6XYE//ZC/FvLcG8tnyH97d/KgdvPwXDE0h0da8m9GcG58RzpazmkruaQuLqI+JUFZG8vIDs8D2c4gnMngjdsSeeZ+7n5HNycPze6FcG/E8G9Z1u9N/cWXts8rwCRC0ZQlMD1fNBz6BkeO34+7u0cnGHRQtzh5+zz3iiAaysMOPxrEdwruTg38snciTA9vB7YbXaHxZ0GN2N3AJgpMiGtELzMG9lMoc7Qd/Z7e0w4saHv9X7Wef+nvXMNte266vg65+xzzj7Pe+7Nuff03pt3G1objG1ME7W9IpVQCTVSqpZUKkZMiSik2pp+CKgfzAcpVBFbNdoiFB/UQCBSIaCxRaGlEUKj1UaTe85+v9+P9V4/+c+51r773ua2+ZDmZF+yYbDW2WudNedce/zHGHOOMcfQDenqlT21pSNMVpP5e7P/eQ2Pi61BjLpo2fyFenN6twKI1yAYfgGv+wCj8gFBykAGJIe7cLhHUlw2zDVj9pTpzffFVcssxQ3C4gaRoXUiAUf/V7IMaRhMIBOVVgxDhUeb+IfbeEebuEdr+KVlw0xBeQ4gApWYV9pLx1ekZSjM0woUrqDisgG/gB6lNGN68/xlOBRAduHidRYcR1skxRU79jmA6DuBIyht4JUFEgsYLp6Bl09YgBw6hC/t4R+ewa+t06v+KPCNWVraLD3t/PE15NVjedSCA0RBbG0SJfkUQLIZn9cmGn+JsP8rDCoHeLWUoSVZ5wEiUBxtw+EpC5qjbZLChmEgaZ2M6SSZDSikPTIm1bmeV1mGSg7Ka1DKQ3ELSlvEpTxROWe0lJ5zuXRfsYx7tGo1wByDZ22a9tUHUUm0bLSeQJqRwBsW9ggLu/Z6phHNuASOdduOjuqrKAW3GY/uS7/XM/W8oLx+OUAO94y3XCBOjhyiQwmNMwT1c4waHzTlyuYBceX5sXD1a9jowgNEkaAJw0srI9IickxNvkwyfJhh5Rx+PZXyxSU4OgFHJ43Ej8s5OHwLXLzJHgsnoZg31y4zu8pLICqtXKJiDor6O2OyHJRWobgOZdEqSWWZuOIQV+fMOPWhoBCNbShsEpdXCGvWTAqrVx6XiCpLhFUdVwirK5cdo3KeqLBvSIBUW7N+q39qR6Q2pS10vWr7Y8ZeEqgtaOLSGmF5E7+Sx6+uEVbWDMiTw004WjX3xUUHX1TZJ27eRdB+xGQ3VMK7q9FryKvH8qhrACADU/4sVLmRTIOoypVqjkw/ybBy/rsBUjhBXF5CTBEfnic+vIm4cNL8PQNCYQcO9y1wdCxsg5hODJWZJjqXmXSZqSTQrBmQJBULkkQAEXOKStI8q1DYgKNdKJwkKl5HXLiOqHiSuHiCqLRDLO90OY8YWeBQfzXXENiiWnrUuUBS2jb3azy2jysWFCnzZyDIwGPGXs4ZcGZjEcAMQKpr+FVpPgFkjeRIgmDJ9F3j8CoOXv0MtD8Cg88DxauCQ6BZ9M+CA0TqYmQAIoetqvVaM0t5tP4VvN9nVLmZoGElp2XOXSjuoh87ruaIi6dJCgck5S0SmUoymaQBCqfg6ACOzkJhH4oblrkFDgMQSdVV4osOiZaOj7R8rIntBklph6SyTVzbJK6vkEiDZSSwGI0iIOVtO4XTcHTatqN2i3tQ3IGS2pQ2kgknU+6SFphpA2mfWo64sk5S3ki119y92f9Iu5W27DjLW8QpeM1zypqPbRFVdghqqwS1HFFV4F414zL3qJ26w7Tu4LbOQ+dRGP9zmsl/0WFw9f4vPEC0dq4sF0EYm4TEBiDBANxvgv8ZhtVb8RsOSU2MLem9C5LQ6Q8e1WQGrVqGETBKYsydOQYVYFKmFrPpXPMMAefoPMHhJkEhT1DcJSjvE1UPiOpvIWyeIWrv4TVX8VoOQdtS3HKImw5JMwWNeZ6kdEoFmWnSMK90FHA1n0iPpRxx3TECQMxrxqL5lsYqyjSXQCYAHp0nKZwnKe4bQOkdWNBdDSA5Qk329ZyWQ9R0GDUdJu3boPtn4B7aBGxX56+Fv7LgALGhCcqDpGTP0iIGICai97/A/3OGldssQIzUFhMKAFsWIA2HSFRPtUl5awYgM58QIPR/lzGbJvGnSApniUoHTKs7TGu7TGvXM629E7f2Ptz6B/DqP4vfuJ9h6f0My+9lXHkX49oPMandilu/Aa9+lrB+ykr8kky3V0tWc2VzHb+2wbSxjltfNeOUttSYYgkFM/fSsvSOAXRydKMBiLRmXNkgrl7SNJdpkPqy1SBVh1CTeD2n5eA3Hfoth3H73baisetjizIuPA6uOoCFB4h2nYmU7yJK4nQeotpuSjL2JOPq7YQtB2RmVVahvA2VzRlAdM1I9NoqSXUTypv2PgFDjNFO/7eYJzw8QVg4RVw5CXrZgg4AAAtoSURBVO1NktEq3bbDoJ/HG78L/Acg/AxET0P0gu1DUAXvJZh+E8ZfgcET0H2cpPVJks4DJK0fgc710DpH3NwnrO/h17bwqmu41WW8umx+B6/x3TSt5Um6dzOu30W3dJpBZQWv48DQgZEDfS3drpKUdqFyGqpnoLZPXN4lKGziyjko8NeXobpDUjtB1FgjauZIGjk7fr23ngNdh2HNodtycMf3wvg/bKzVnMviqly2wBcWHCByKcndZGM7zTZNpThX2Luv3K1fYVK7IwWImECTZwEkTyIp23TMtVAmT32JpLYC1ZRpxDiipv7OQ/mAuHQ9fuU00/oWk7ZDv+vQ7p+mN76bIHwQ+CPgqxBXLfMoM2ZGZiuEijMewvQ5m5zZ+xuYPgbex8F9CNwHYfIxkvFHiEcfJhrdz7DxfobNCwxb72XY+rHLqf4zMHiUuPfbBIMP4vVvZ9o9ybTtGLNO4Pc1udcSdG0DGinVNohKm4SFHMjUayxBfZu4sUvcXCdurUBTpPELaBlAlui0tnHHD8BEmeJTjb3AAPh+XV98gJgANoFEOkSe3DSpq+KzkmcZ1d5NIKmqH7y6DtUtqK0bE0TAkLQUGUbINI00R00T9h2oHED5RpLqzfiNc0xaJ+h3lul2HarNk/T6v8Zw9DmI/xH4lg2YNHn6bbIVw0RZaQOFsmq/ihIuKwI5Ua5ZBVkeXkEXbTkwlQTzXgTvf9KjzkWqN/J/4L4I4+fBfxaSv4LwEdz+Bbr1M3SqDoOqw7TmmFU8CQOjDVtL0MpBbQ2qcyCob84BRPdYs8ocew5xZ4lBbYtu++1Mp58CL417e1ODfD+MHd/1zDeo1V3l3FNu1kRhpAohVV3j+BvG/Ai7S9CUFM1DbRsa6ySadIrqW0SSrGIgMYWOAkhVK0L7UD5PVLkBt3aGcWufQfeAweAWBoMfptv7RfzpU/iT75AombRSYipUNTH1XAh8peyPbck1PyJRhgl5+udJ4eAaQLZMHdnVOFVDMqtyuq65laK/rzjOIgdUU8/klnoK3MeYdO6jU7uJTjXHpOnga5wyFWe0DNIS+jsDQiNP3Ngmaq0TtZcu3avrPYewk6PfOE2v89P43mchTOv4Lf5K7vdk4IXWIBlAxDeKH1X5MlOlT3HRRv0/z6R+D1FvGdrr1sSo70Azbxggbmsyu2fImFIZQBrSNntQvc7Y5V4jz6jt0Ovs0u/dyWT0q0STJ0imXzf17xJhMiviog2NgUK8lSAwxo083GiKH3oEQWTvU6oi5eFWOq8AVB5bKf7NMbT15ZXuX3Xm9RyVZ1ZMplFEc0cpJBNBYMKYhKgaRP8O7ufwex9l1Lwdt3OCsOsQSYvOAJKedx3Ivm+tkbS2iNt54s6y/V7XDUBWCDp5hs23Mug+SBT+A9A3+zNM+9+TxRb74jUEEC33XgmQbzNt/ARxfwU6eWhuQmPbAqTjkHQEkG2o71oTbAYQzTs2jV0uDeO39ph0rmc6uIA3fgS8v4egOB93Zwq8yKpTni5PlY5izYtk9qkYp6JOxzY1v8K2Vc5R6wjTtE55Visv3RCUKhS7IGeUi4LzsgC9YC7LoA0X97VXQgGwZhVPCRFeAO8JGD5E3LsLBmegl7NMn40xM7kEGoGklSNpbbwyQDrrBJ0NRq07mPQfI4m+ZnZwqp/XuALJsrsvJsqNBpltotEOs4HdU6BfTltEoheZNt9HPJREzENrywKktW5XZnoOSVvmRg5km18pYVs5osZJovZ7oKeUNV8A7zkIW2memjTwVB2RRSdGjxLCWKtq6o9KgGXFgurEtEzcWKLiLaaSZEzsTYi9MYk/MWSrsGpvvW/J7ItIH64G5klGZTRmjGtSH6kP1nyTSvoOeE9C52EY/DgMdu2YBRBjQq5BffWSpmivQnuDpLNB0l0xq1ZaudJ7STqb+J0dxq27mfY/C8l/ktAzuzmUSf1a/iy+BplJ3WzDjSoHmdJUEB/Rb96HPxRzbEB7DVoyr1ZIeg7RwCHpWk0ibWKo6xB3HWNSuK0bGFcvEDQ/AcOnIaxdmj+oqJFKHqkSkeFKa+jZdTUFUSqZRB8vruFTMRSmZecS7Z8XghPXFoFRIRjZW7Fr615okm9SqgagIhmKo8lIdpUSganZOGIYdEyZumy/t9EiMi81B4vK0Pw76PwOcfcCQec0QTN1LMr7LoAIBKIUIFq+pnNJgFjBsYHbyzPs3sN08NeQaGGhk9by07iv3c9CA0RAMOE+EmLSGjqa8lppYQma+NGniZO7jEc7C/kwXmzXIZSvYOLA2B4j18FzHSbuHsPBXXRbvwxodep/LcOKKWecqMYyu0bnGYlZdG73O1hO1n0Z6SGXwDQ7NzPwbD/qFUcB/pXIlAHQ8oTmX5bsttW0CfVV5L4Mk7/EG/wS7vBtuON1omkOvHW8wSphb90KjqY07J4xtYKuJubWl8JwnUnPYTB8D/CMWYyIgxrE3VQ4vAmQN+wbmAEkE+RGoqcOQwZMkj8k4l6C3qaVlNmk1F3DFUCCJZg6uAP5Ndbo925hNLmPIPhd4lCZxwvWrMmePzO89YVE9fFJUJk3mulYcNgkBjo3XbQYtQARSLxvEbt/QeB+FHf6TsajLYajJcbjNYLhBnRWQaZnW9p225il8UDaJQ+DTaaDJUbjnySOv27BqoDQWJrw+Mb/ejDlYmuQbJIoZsgY+DKAjHD5Y+B+kuGe9TDL6dVeIh6fYNhdI/KW8CcO49Eqo94dTFufhsHTELycMoCYQMtOWWNiCGkDgUNHXTiejwWIACESWNJIm6yrpm/qZ5r1JX4egi8RTn7DLFO3uuuM/FWCqQMCgzzm/TUQYCZ5GG9D6xR0zuIODphMfoFQBUfN+5apl6YsOZ7hvy6tLjRAZHWIKYz5MbNabLoY41tgyjT+PAk/TzLcTwGi5cwNgv4NdJtnGfZPMRnfSOz/FISPgvcv4CoaODVPDP8LfQKD3OFyjeuo744PHJY7sjiCOXCkczKZWppIJ5RT56Wck/JdfBvcv8V1H2Ls3s3QO4fnrs8BxCEZrRBNNmC8B63T0LqFYHAn/vS3CIKL1m+TCST9CNfw55oAiPmNMvEp71qWewmPSfBF4vhjBL2zYPwh20T1A/zunXRq9zDs30sw+TgEXwSesxNQ1S9XbXIpjxkOBAyFr6RfqtHjxkfaB5mZOrUaRWDRUvDYACSmSmKqwk7AV7HxLgT/DclTwOOMph/And5KMsgZEzToOfgDB3+cIx5uQ/skNG8j6X+I2PtT/KBpMi3JspL/xzT8JkDemG9ATJFVHJrZF2ZZNPVOyzoPv0wS/Tpe+3oSrWI194kb74DRzzHuChh/AsE/QfCSRYRQEcuZEZo0v+a5BggyVQQO1dhOwaHvdX5cH7UtzWnUqLqiRGpKhdMzWUOSNHtIGA8J/AmhOzXLyoQaR9XsJ/emjxO5H4bRLdBfwu87TAcOk5FDMNIqV564+TYYPAzBk3hB2zgtzWt+EyDH9cu/unYlMTVFNdktLFpsPIYWgQzTyAR5BpJH8btvNev5tG+G3gWIPwHTL0DUtNaTvG3hBCLtb1fohkgJ6dKJuJ4vEijm6dV19Qdzl/ohJpWWM1pEDskyCSUSswwbGLdJpKGFIUE0IIx7JhvlDPj+C2ZbANMPwehGgvEy3sRhqlU+1yHpO8Stm2D0KYifwY2qxtgMtYqctvuDGdwb46kLbmLNAyQtMGEcaRlA9JL/Dfg9wt7boasVmnfA+D7gD4BnLf9rSiFGk3POVDxSphTFVknKyqxKJ+kCXSaxj1t7aGjqg6ZGM0kuR+nLJLyUAiSe1d2QqIgZEtEhSnzj+Y+MM1UhKl8F/zfBvYPE3cL3HfzQIQwdkoEins/B+DHga0ySkpmeyRUj18yxatDXAUP/Dzulf7mPSc8LAAAAAElFTkSuQmCC
+```
+
+### `annotation`
+
+NavLink 自定义资源的 Kubernetes 注释。
+
+### `label`
+
+NavLink 自定义资源的 Kubernetes 标签。
+
+### `sideLabel`
+
+出现在左侧导航栏中的标签。
+
+### `target`
+
+设置链接的锚标记 (``) 的目标属性。该属性(取决于浏览器)确定它是在新窗口中打开还是在当前选项卡中打开。
+
+默认值为 `_self`,即在当前选项卡上打开链接。要在新窗口或选项卡中打开链接,请将目标设置为 `_blank`。
+
+有关目标属性的详细信息,请参阅[此页面](https://www.w3schools.com/tags/att_a_target.asp)。
+
+### `toService`
+
+有五个字段用于创建如下所示的 URL:`https:///k8s/clusters//k8s/namespace//service/: :/proxy/`
+
+例如,Monitoring 服务的链接可以这样设置:
+
+- 名称:`rancher-monitoring-grafana`
+- 命名空间:`cattle-monitoring-system`
+- 路径:`proxy/?orgId=1`
+- 端口:`"80"`
+- Scheme:`http`
+
+需要提供 `toService` 或 `toURL` 指令中的一个。
+
+### `toUrl`
+
+可以是任何链接,甚至可以是集群外部的链接。
+
+需要提供 `toService` 或 `toURL` 指令中的一个。
+
+## 链接示例
+
+### `toUrl` 的链接示例
+
+以下 NavLink YAML 显示了将 NavLink 配置到 Grafana 仪表板的示例:
+
+```yaml
+apiVersion: ui.cattle.io/v1
+kind: NavLink
+metadata:
+ name: grafana
+spec:
+ group: "Monitoring Dashboards"
+ toURL: https:///api/v1/namespaces/cattle-monitoring-system/services/http:rancher-monitoring-grafana:80/proxy/?orgId=1
+```
+
+添加上述 YAML 会创建 Grafana 的链接,如以下截图所示:
+
+
+
+### `toService` 的链接示例
+
+以下 YAML 显示了用于链接目标的 `toService` 示例:
+
+```yaml
+apiVersion: ui.cattle.io/v1
+kind: NavLink
+metadata:
+ annotations:
+ key: annotation
+ labels:
+ key: label
+ name: navlinkname
+spec:
+ description: This is a description field # Optional.
+ group: "group1" # 可选。如果未提供,则显示为独立链接。
+ iconSrc: data:image/jpeg;base64,[icon source string is clipped for brevity]
+ label: This is a label # 可选。
+ sideLabel: A side label. # 可选。
+ target: _blank # 可选。_blank 表示在新选项卡或窗口中打开链接。
+ toService: # 需要提供 toService 或 #toUrl。
+ name: rancher-monitoring-grafana
+ namespace: cattle-monitoring-system
+ path: proxy/?orgId=1
+ port: "80"
+ scheme: http
+```
+
+添加上面的 `toService` 参数会创建 Grafana 的链接,如以下截图所示:
+
+
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
new file mode 100644
index 00000000000..818ccfc025a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md
@@ -0,0 +1,57 @@
+---
+title: 配置全局默认私有镜像仓库
+---
+
+:::note
+本页介绍了安装 Rancher 后如何从 Rancher UI 配置全局默认私有镜像仓库。
+
+有关如何在 Rancher 安装期间设置私有镜像仓库的说明,请参阅[离线安装指南](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+
+:::
+
+私有镜像仓库是集群中私有、一致且集中的容器镜像源。你可以使用私有容器镜像仓库,在组织内共享自定义基础镜像。
+
+在 Rancher 中设置私有镜像仓库主要有两种方式:
+
+* 通过全局视图中的 **Settings** 选项卡设置全局默认镜像仓库。
+* 在集群级别设置下的高级选项中设置私有镜像仓库。
+
+全局默认镜像仓库适用于离线环境,可用于不需要凭证的镜像仓库。而集群级私有镜像仓库用于需要凭证的私有镜像仓库。
+
+## 将不需要凭证的私有镜像仓库设置为默认镜像仓库
+
+1. 登录 Rancher 并配置默认管理员密码。
+1. 选择 **☰ > 全局设置**。
+1. 转到 `system-default-registry` 并选择 **⋮ > 编辑设置**。
+1. 输入你镜像仓库的主机名和端口(例如 `registry.yourdomain.com:port`)。不要在文本前加上 `http://` 或 `https://`。
+
+**结果**:Rancher 会从你的私有镜像仓库中拉取系统镜像。
+
+### 带 RKE2 下游集群的命名空间私有镜像仓库
+
+默认情况下,大多数私有镜像仓库应该能与 RKE2 下游集群一起工作。
+
+但是,如果你尝试设置 URL 格式为 `website/subdomain:portnumber` 的命名空间私有镜像仓库,则需要执行额外的步骤:
+
+1. 选择 **☰ > 集群管理**。
+1. 在列表中找到 RKE2 集群,然后点击 **⋮ > 编辑配置**。
+1. 从**集群配置**菜单中,选择**镜像仓库**。
+1. 在**镜像仓库**中,选择**配置高级 Containerd Mirror 和仓库认证选项**选项。
+1. 在 **Mirrors** 下的文本字段中,输入**镜像仓库主机名**和 **Mirror 端点**。
+1. 单击**保存**。
+1. 根据需要对每个下游 RKE2 集群重复操作。
+
+## 创建集群时配置使用凭证的私有镜像仓库
+
+无法为每个 Rancher 配置的集群全局设置具有授权认证的私有镜像仓库。因此,如果你希望 Rancher 配置的集群从使用凭证的私有镜像仓库中拉取镜像,则每次创建新集群时都必须通过高级集群选项传递镜像仓库凭证。
+
+由于创建集群后无法配置私有镜像仓库,因此你需要在初始集群设置期间执行这些步骤。
+
+1. 选择 **☰ > 集群管理**。
+1. 在**集群**页面上,单击**创建**。
+1. 选择集群类型。
+1. 在**集群配置**中,转到**镜像仓库**选项卡,然后选择**为 Rancher 从私有镜像仓库中拉取镜像**。
+1. 输入镜像仓库主机名和凭证。
+1. 单击**创建**。
+
+**结果**:新集群将从私有镜像仓库中拉取镜像。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
new file mode 100644
index 00000000000..09b1cec4990
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md
@@ -0,0 +1,236 @@
+---
+title: 集群和项目角色
+---
+
+集群和项目角色定义集群或项目内的用户授权。
+
+要管理这些角色:
+
+1. 单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**并转到**集群**或**项目或命名空间**选项卡。
+
+### 成员资格和角色分配
+
+非管理用户可以访问的项目和集群由 _成员资格_ 决定。成员资格是根据该集群或项目中分配的角色而有权访问特定集群或项目的用户列表。每个集群和项目都包含一个选项卡,具有适当权限的用户可以使用该选项卡来管理成员资格。
+
+创建集群或项目时,Rancher 会自动将创建者分配为`所有者`。分配了`所有者`角色的用户可以在集群或项目中给其他用户分配角色。
+
+:::note
+
+默认情况下,非管理员用户无法访问任何现有项目/集群。具有适当权限的用户(通常是所有者)必须显式分配项目和集群成员资格。
+
+:::
+
+### 集群角色
+
+_集群角色_ 是你可以分配给用户的角色,以授予他们对集群的访问权限。集群的两个主要角色分别是`所有者`和`成员`。
+
+- **集群所有者:**
+
+ 可以完全控制集群及其中的所有资源。
+
+- **集群成员:**
+
+ 可以查看大多数集群级别的资源并创建新项目。
+
+#### 自定义集群角色
+
+Rancher 支持将 _自定义集群角色_ 分配给普通用户,而不是典型的`所有者`或`成员`角色。这些角色可以是内置的自定义集群角色,也可以是 Rancher 管理员定义的角色。这些角色便于为集群内的普通用户定义更受限或特定的访问权限。有关内置自定义集群角色的列表,请参阅下表。
+
+#### 集群角色参考
+
+下表列出了可用的内置自定义集群角色,以及默认的集群级别角色`集群所有者`和`集群成员`是否包含该权限:
+
+| 内置集群角色 | 所有者 | 成员 |
+| ---------------------------------- | ------------- | --------------------------------- |
+| 创建项目 | ✓ | ✓ |
+| 管理集群备份 | ✓ | |
+| 管理集群应用商店 | ✓ | |
+| 管理集群成员 | ✓ | |
+| 管理节点[(见下表)](#管理节点权限) | ✓ | |
+| 管理存储 | ✓ | |
+| 查看所有项目 | ✓ | |
+| 查看集群应用商店 | ✓ | ✓ |
+| 查看集群成员 | ✓ | ✓ |
+| 查看节点 | ✓ | ✓ |
+
+#### 管理节点权限
+
+下表列出了 RKE 和 RKE2 中`管理节点`角色可用的权限:
+
+| 管理节点权限 | RKE | RKE2 |
+|-----------------------------|-------- |--------- |
+| SSH 访问 | ✓ | ✓ |
+| 删除节点 | ✓ | ✓ |
+| 集群的垂直扩缩容 | ✓ | * |
+\***在 RKE2 中,你必须拥有编辑集群的权限才能对集群进行垂直扩缩容。**
+
+
+如果需要了解各个集群角色如何访问 Kubernetes 资源,在 Rancher UI 中找到这些角色:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 单击**集群**选项卡。
+1. 单击角色的名称。表格会显示角色授权的所有操作和资源。
+
+:::note
+
+在查看 Rancher 创建的默认角色关联的资源时,如果在一行上有多个 Kubernetes API 资源,则该资源将带有 `(Custom)` 标识。这不代表这个资源是自定义资源,而只是表明多个 Kubernetes API 资源作为一个资源。
+
+:::
+
+### 为集群成员提供自定义集群角色
+
+在管理员[设置自定义集群角色后](custom-roles.md),集群所有者和管理员可以将这些角色分配给集群成员。
+
+要将自定义角色分配给新的集群成员,你可以使用 Rancher UI。要修改现有成员的权限,你需要使用 Rancher API 视图。
+
+要将角色分配给新的集群成员:
+
+
+
+
+1. 点击 **☰ > 集群管理**。
+1. 转到要将角色分配给成员的集群,然后单击 **Explore**。
+1. 单击**集群成员**。
+1. 单击**添加**。
+1. 在**集群权限**中,选择要分配给成员的自定义集群角色。
+1. 单击**创建**。
+
+
+
+
+1. 点击 **☰ > 集群管理**。
+1. 转到要将角色分配给成员的集群,然后单击 **Explore**。
+1. 点击**集群 > 集群成员**。
+1. 单击**添加**。
+1. 在**集群权限**中,选择要分配给成员的自定义集群角色。
+1. 单击**创建**。
+
+
+
+
+**结果**:成员具有所分配的角色。
+
+要将自定义角色分配给现有集群成员:
+
+1. 单击 **☰ > 用户 & 认证**。
+1. 找到要分配角色的成员。单击 **⋮ > 编辑配置**。
+1. 如果你添加了自定义角色,它们将显示在**自定义**中。选择要分配给成员的角色。
+1. 单击**保存**。
+
+**结果**:成员具有所分配的角色。
+
+### 项目角色
+
+_项目角色_ 是用于授予用户访问项目权限的角色。主要的项目角色分别是`所有者`、`成员`和`只读`。
+
+- **项目所有者:**
+
+ 可以完全控制项目及其中的所有资源。
+
+- **项目成员:**
+
+ 可以管理项目范围的资源,如命名空间和工作负载,但不能管理其他项目成员。
+
+:::note
+
+默认情况下,Rancher 的`项目成员`角色继承自 `Kubernetes-edit` 角色,而`项目所有者`角色继承自 `Kubernetes-admin` 角色。因此,`项目成员`和`项目所有者`角色都能管理命名空间,包括创建和删除命名空间。
+
+:::
+
+- **只读:**
+
+ 可以查看项目中的所有内容,但不能创建、更新或删除任何内容。
+
+:::danger
+
+如果用户分配到了项目的`所有者`或`成员`角色,用户会自动继承`命名空间创建`角色。然而,这个角色是 [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole),这表示角色的范围会延展到集群中的所有项目。因此,对于显式分配到了项目`所有者`或`成员`角色的用户来说,即使只有`只读`角色,这些用户也可以在分配给他们的其他项目中创建命名空间。
+
+:::
+
+#### 自定义项目角色
+
+Rancher 支持将 _自定义项目角色_ 分配给普通用户,而不是典型的`所有者`、`成员`或`只读`角色。这些角色可以是内置的自定义项目角色,也可以是 Rancher 管理员定义的角色。这些角色便于为项目内的普通用户定义更受限或特定的访问权限。有关内置自定义项目角色的列表,请参阅下表。
+
+#### 项目角色参考
+
+下表列出了 Rancher 中可用的内置自定义项目角色,以及这些角色是否由`所有者`,`成员`或`只读`角色授予的:
+
+| 内置项目角色 | 所有者 | 成员 | 只读 |
+| ---------------------------------- | ------------- | ----------------------------- | ------------- |
+| 管理项目成员 | ✓ | | |
+| 创建命名空间 | ✓ | ✓ | |
+| 管理配置映射 | ✓ | ✓ | |
+| 管理 Ingress | ✓ | ✓ | |
+| 管理项目应用商店 | ✓ | | |
+| 管理密文 | ✓ | ✓ | |
+| 管理 ServiceAccount | ✓ | ✓ | |
+| 管理服务 | ✓ | ✓ | |
+| 管理卷 | ✓ | ✓ | |
+| 管理工作负载 | ✓ | ✓ | |
+| 查看密文 | ✓ | ✓ | |
+| 查看配置图 | ✓ | ✓ | ✓ |
+| 查看 Ingress | ✓ | ✓ | ✓ |
+| 查看项目成员 | ✓ | ✓ | ✓ |
+| 查看项目应用商店 | ✓ | ✓ | ✓ |
+| 查看 ServiceAccount | ✓ | ✓ | ✓ |
+| 查看服务 | ✓ | ✓ | ✓ |
+| 查看卷 | ✓ | ✓ | ✓ |
+| 查看工作负载 | ✓ | ✓ | ✓ |
+
+:::note 注意事项:
+
+- 上面列出的每个项目角色(包括`所有者`、`成员`和`只读`)均由多个规则组成,这些规则授予对各种资源的访问权限。你可以在**全局 > 安全 > 角色**页面上查看角色及其规则。
+- 在查看 Rancher 创建的默认角色关联的资源时,如果在一行上有多个 Kubernetes API 资源,则该资源将带有 `(Custom)` 标识。这不代表这个资源是自定义资源,而只是表明多个 Kubernetes API 资源作为一个资源。
+- `管理项目成员`角色允许项目所有者管理项目的所有成员,**并**授予这些成员任何项目范围的角色(不论他们是否有权访问项目资源)。单独分配此角色时要小心。
+
+:::
+
+### 定义自定义角色
+如前所述,你可以定义自定义角色,并将这些角色用在集群或项目中。上下文字段定义了角色是否显示在集群成员页面、项目成员页面或同时显示在这两个页面。
+
+定义自定义角色时,你可以授予对特定资源的访问权限,或指定自定义角色应继承的角色。自定义角色可以由特定授权和继承角色组成。所有授权都是累加的。换言之,如果你为特定资源定义更受限的授权,自定义角色继承的角色中定义的更广泛的授权**不会**被覆盖。
+
+### 默认集群和项目角色
+
+默认情况下,在普通用户创建新集群或项目时,他们会自动分配到所有者的角色,即[集群所有者](#集群角色)或[项目所有者](#项目角色)。但是,在某些组织中,这些角色可能会被认为有过多的管理访问权限。在这种情况下,你可以将默认角色更改为更具限制性的角色,例如一组单独的角色或一个自定义角色。
+
+更改默认集群/项目角色有以下两种方法:
+
+- **分配自定义角色**:为你的[集群](#自定义集群角色)或[项目](#自定义项目角色)创建一个[自定义角色](custom-roles.md),然后将自定义角色设置为默认。
+
+- **分配单独的角色**:将多个[集群](#集群角色参考)/[项目](#项目角色参考)角色配置为默认角色,并分配给创建的用户。
+
+ 例如,你可以选择混合使用多个角色(例如`管理节点`和`管理存储`),而不是使用继承的角色(例如`集群所有者`)。
+
+:::note
+
+- 虽然你可以[锁定](locked-roles.md)一个默认角色,但系统仍会将这个角色分配给创建集群/项目的用户。
+- 只有创建集群/项目的用户才能继承他们的角色。对于之后添加为集群/项目成员的用户,你必须显式分配角色。
+
+:::
+
+### 为集群和项目创建者配置默认角色
+
+你可以更改为创建集群或项目的用户自动创建的角色:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 单击**集群**或**项目或命名空间**选项卡。
+1. 找到你要用作默认角色的自定义或单个角色。然后通过选择 **⋮ > 编辑配置**来编辑角色。
+1. 在**集群创建者的默认角色**或**项目创建者的默认角色**中,将角色启用为默认。
+1. 单击**保存**。
+
+**结果**:默认角色已根据你的更改配置。分配给集群/项目创建者的角色会在**集群创建者的默认角色/项目创建者的默认角色**列中勾选。
+
+如果要删除默认角色,请编辑权限,并在默认角色选项中选择**否**。
+
+### 撤销集群成员资格
+
+如果你撤销一个普通用户的集群成员资格,而且该用户已显式分配集群的集群 _和_ 项目的成员资格,该普通用户将[失去集群角色](#集群角色)但[保留项目角色](#项目角色)。换句话说,即使你已经撤销了用户访问集群和其中的节点的权限,但该普通用户仍然可以:
+
+- 访问他们拥有成员资格的项目。
+- 行使分配给他们的任何[单个项目角色](#项目角色参考)。
+
+如果你想完全撤销用户在集群中的访问权限,请同时撤销他们的集群和项目成员资格。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
new file mode 100644
index 00000000000..0ab8560983b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md
@@ -0,0 +1,120 @@
+---
+title: 自定义角色
+---
+
+在 Rancher 中,_角色_ 决定了用户可以在集群或项目中执行哪些操作。
+
+请注意,_角色_ 与 _权限_ 不同,权限决定的是你可以访问哪些集群和项目。
+
+:::danger
+
+自定义角色可以启用权限提升。有关详细信息,请参阅[本节](#权限提升)。
+
+:::
+
+
+## 先决条件
+
+要完成此页面上的任务,需要以下权限之一:
+
+- [管理员全局权限](global-permissions.md)。
+- 分配了[管理角色](global-permissions.md)的[自定义全局权限](global-permissions.md#自定义全局权限)。
+
+## 创建自定义角色
+
+虽然 Rancher 提供一组开箱即用的默认用户角色,但你还可以创建默认的自定义角色,从而在 Rancher 中为用户提供更精细的权限。
+
+添加自定义角色的步骤因 Rancher 的版本而异。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 选择一个选项卡来确定要添加的角色的范围。这些选项卡是:
+
+- **全局**:仅在允许成员管理全局范围的资源时,可以分配该角色。
+- **集群**:仅在向集群添加/管理成员时,可以分配该角色。
+- **项目或命名空间**:仅在向项目或命名空间添加/管理成员时,可以分配该角色。
+
+1. 根据所需要的范围,单击**创建全局角色**、**创建集群角色**或**创建项目或命名空间的角色**。
+1. 输入角色的**名称**。
+1. 可选:选择**集群创建者的默认角色/项目创建者的默认角色**选项,以将该角色分配给集群/项目创建者。使用此功能,你可以扩展或限制集群/项目创建者的默认角色。
+
+ > 开箱即用的**集群创建者的默认角色**和**项目创建者的默认角色**分别是`集群所有者`和`项目所有者`。
+
+1. 使用**授权资源**选项将各个 [Kubernetes API 端点](https://kubernetes.io/docs/reference/)分配给角色。
+
+ > 在查看 Rancher 创建的默认角色关联的资源时,如果在一行上有多个 Kubernetes API 资源,则该资源将带有 `(Custom)` 标识。这不代表这个资源是自定义资源,而只是表明多个 Kubernetes API 资源作为一个资源。
+
+ > **资源**文本字段可以用来搜索预定义的 Kubernetes API 资源,或者为授权输入自定义资源名称。在此字段中输入资源名称后,必须从下拉列表中选择预定义或`(自定义)`资源。
+
+ 你还可以选择每个分配的端点可用的 cURL 方法(`Create`、`Delete`、`Get` 等)。
+
+1. 使用 **Inherit from** 选项将各个 Rancher 角色分配给你的自定义角色。请注意,如果自定义角色从父角色继承,你需要先删除子角色才能删除父角色。
+
+1. 单击**创建**。
+
+## 创建从另一个角色继承的自定义角色
+
+如果你有一组需要在 Rancher 中具有相同访问权限的用户,一种节省时间的方法是创建一个新的自定义角色,而该角色的规则都是从另一个角色(例如管理员角色)复制而来的。这样,你只需要配置现有角色和新角色之间不同的部分。
+
+然后,你可以将自定义角色分配给用户或组。该角色在用户首次登录 Rancher 时生效。
+
+要基于现有角色创建自定义角色:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 单击**集群**或**项目或命名空间**选项卡。根据所需要的范围,单击**创建集群角色**或**创建项目或命名空间的角色**。请注意,只有集群角色和项目/命名空间角色可以从另一个角色继承。
+1. 输入角色的名称。
+1. 在 **Inherit From** 选项卡中,选择自定义角色需要从哪个角色继承权限。
+1. 在**授权资源**选项卡中,选择拥有自定义角色的用户要启用的 Kubernetes 资源操作。
+
+ > **资源**文本字段可以用来搜索预定义的 Kubernetes API 资源,或者为授权输入自定义资源名称。在此字段中输入资源名称后,必须从下拉列表中选择预定义或`(自定义)`资源。
+1. 可选:将角色设置为默认。
+1. 单击**创建**。
+
+## 删除自定义角色
+
+删除自定义角色时,具有此自定义角色的所有全局角色绑定(Global Role Bindings)都将被删除。
+
+如果某个用户仅分配了一个自定义全局角色,而且你删除了这个角色,该用户将不能再访问 Rancher。要让用户重新获得访问权限,管理员需要编辑用户并应用新的全局权限。
+
+自定义角色可以删除,但内置角色不能删除。
+
+要删除自定义角色:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+2. 转到要删除的自定义全局角色,然后单击 **⋮ (…) > 删除**。
+3. 单击**删除**。
+
+## 为组分配自定义角色
+
+如果你有一组需要在 Rancher 中具有相同访问权限的用户,一种节省时间的方法是创建一个新的自定义角色。将角色分配给组时,组中的用户在首次登录 Rancher 时就会拥有配置的访问级别。
+
+组中的用户登录时,他们默认获得内置的**普通用户**全局角色。他们还将获得分配给他们的组的权限。
+
+如果将用户从外部身份验证系统的组中删除,用户将失去分配给该组的自定义全局角色的权限。但是,用户仍会拥有**普通用户**角色。
+
+:::note 先决条件:
+
+只有在以下情况下,你才能将全局角色分配给组:
+
+* 你已设置[外部身份验证提供程序](../../../../pages-for-subheaders/authentication-config.md#外部验证与本地验证)。
+* 外部身份验证提供程序支持[用户组](../../authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups.md)。
+* 你已使用身份验证提供程序设置了至少一个用户组。
+
+:::
+
+要将自定义角色分配给组,请执行以下步骤:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**组**。
+1. 转到将分配自定义角色的组,然后单击 **⋮ > 编辑配置**。
+1. 如果你已创建角色,角色将显示在**自定义**中。选择要分配给组的自定义角色。
+1. 可选:在**全局权限**或**内置角色**中,选择要分配给该组的其他权限。
+1. 单击**保存**。
+
+**结果**:自定义角色将在组内用户登录 Rancher 时生效。
+
+## 权限提升
+
+`配置应用商店`这个自定义权限很强大,应谨慎使用。如果管理员将`配置应用商店`权限分配给普通用户,可能会导致权限提升。在这种情况下,用户可以让自己对 Rancher 配置的集群进行管理员访问。因此,拥有此权限的任何用户都应被视为管理员。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
new file mode 100644
index 00000000000..7f7606413f1
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md
@@ -0,0 +1,365 @@
+---
+title: 全局权限
+---
+
+
+
+
+
+_权限_ 是你在为用户选择自定义权限时可以分配的个人访问权限。
+
+全局权限定义用户在任何特定集群之外的授权。Rancher 提供四种开箱即用的默认全局权限:`Administrator` (管理员)、`Restricted Admin` (受限管理员)、`Standard User` (标准用户) 和 `User-Base` 用户。
+
+- **管理员**:可以完全控制整个 Rancher 系统和其中的所有集群。
+
+- **受限管理员**:可以完全控制下游集群,但不能更改本地 Kubernetes 集群。
+
+- **普通用户**:可以创建新集群并使用它们。普通用户还可以在自己的集群中向其他用户分配集群权限。
+
+- **User-Base 用户**:只有登录权限。
+
+你无法更新或删除内置的全局权限。
+
+## 分配全局权限
+
+本地用户的全局权限分配与使用外部认证登录 Rancher 的用户不同。
+
+### 新本地用户的全局权限
+
+在创建新本地用户时,请在填写**添加用户**表单时为他分配全局权限。
+
+如果需要查看新用户的默认权限:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. **角色**页面有按范围分组的角色选项卡。每个表都列出了范围内的角色。在**全局**选项卡的**新用户的默认角色**列中,默认授予新用户的权限用复选标记表示。
+
+你可以[更改默认全局权限来满足你的需要](#配置默认的全局权限)
+
+### 使用外部认证登录的用户的全局权限
+
+当用户首次使用外部认证登录 Rancher 时,他们会自动分配到**新用户的默认角色**的全局权限。默认情况下,Rancher 为新用户分配 **Standard User** 权限。
+
+如果需要查看新用户的默认权限:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. **角色**页面有按范围分组的角色选项卡。每个表都列出了范围内的角色。在每个页面的**新用户的默认角色**列中,默认授予新用户的权限用复选标记表示。
+
+你可以[更改默认权限来满足你的需要](#配置默认的全局权限)
+
+你可以按照[步骤](#为单个用户配置全局权限)操作来将权限分配给单个用户。
+
+如果外部认证服务支持组,你可以[同时为组中的每个成员分配角色](#为组配置全局权限)。
+
+## 自定义全局权限
+
+使用自定义权限可以为用户提供 Rancher 中更为受限或特定的访问权限。
+
+当来自[外部认证](../authentication-config/authentication-config.md)的用户首次登录 Rancher 时,他们会自动分配到一组全局权限(以下简称权限)。默认情况下,用户第一次登录后会被创建为用户,并分配到默认的`用户`权限。标准的`用户`权限允许用户登录和创建集群。
+
+但是,在某些组织中,这些权限可能会被认为权限过大。你可以为用户分配一组更具限制性的自定义全局权限,而不是为用户分配 `Administrator` 或 `Standard User` 的默认全局权限。
+
+默认角色(管理员和标准用户)都内置了多个全局权限。系统管理员角色包括所有全局权限,而默认用户角色包括三个全局权限,分别是创建集群、使用应用商店模板和 User Base(登录 Rancher 的最低权限)。换句话说,自定义全局权限是模块化的,因此,如果你要更改默认用户角色权限,你可以选择需要包括在新的默认用户角色中的全局权限子集。
+
+管理员可以通过多种方式强制执行自定义全局权限:
+
+- [创建自定义全局角色](#自定义全局角色).
+- [更改新用户的默认权限](#配置默认的全局权限).
+- [为单个用户配置全局权限](#为单个用户配置全局权限).
+- [为组配置全局权限](#为组配置全局权限).
+
+### 结合内置的全局角色
+
+Rancher 提供了多个全局角色,它们为某些常见的用户场景授予了精细的权限。下表列出了每个内置的全局角色(`Administrator`, `Standard User` 和 `User-Base`)以及它包含的默认全局权限
+
+| 自定义全局权限 | Administrator | Standard User | User-Base |
+| --------------------------- | ------------- | ------------- | --------- |
+| 创建集群 | ✓ | ✓ | |
+| 创建 RKE 模板 | ✓ | ✓ | |
+| 管理认证 | ✓ | | |
+| 管理应用商店 | ✓ | | |
+| 管理集群驱动 | ✓ | | |
+| 管理主机驱动 | ✓ | | |
+| 管理 PodSecurityPolicy 模板 | ✓ | | |
+| 管理角色 | ✓ | | |
+| 管理设置 | ✓ | | |
+| Manage Users | ✓ | | |
+| 使用应用商店模板 | ✓ | ✓ | |
+| User-Base (基本登录访问) | ✓ | ✓ | |
+
+如果需要查看每个全局权限对应哪些 Kubernetes 资源:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 如果单击单个角色的名称,表格会显示该角色授权的所有操作和资源。
+
+:::note 注意事项:
+
+- 上面列出的每个权限都包含多个未在 Rancher UI 中列出的权限。如果需要获取完整权限列表以及组成权限的规则,请通过 `/v3/globalRoles` API 进行访问。
+- 在查看 Rancher 创建的默认角色关联的资源时,如果在一行上有多个 Kubernetes API 资源,则该资源将带有 `(Custom)` 标识。这不代表这个资源是自定义资源,而只是表明多个 Kubernetes API 资源作为一个资源。
+
+:::
+
+### 自定义全局角色
+
+当内置的全局角色无法直接满足你的需求时,可以创建自定义全局角色。
+
+通过 UI 或自动化(例如 Rancher Kubernetes API) 创建自定义全局角色,你可以指定使用与上游角色和集群角色相同的规则。
+
+#### Escalate 与 Bind
+
+当授予全局角色权限时,请注意 Rancher 遵循 `escalate` 与 `bind`,其方式类似于 [Kubernetes](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#restrictions-on-role-creation-or-update)。
+
+在全局角色提供的这两项配置,都可以授予用户绕过 Rancher 提升校验的权限。这可能允许用户成为管理员。由于这有着极高的安全风险,因此应非常谨慎地将 `bind` 和 `escalate` 权限分发给用户。
+
+`escalate` 允许用户更改全局角色并添加任何授权,即使用户没有当前或新版本的全局角色权限。
+
+`bind` 允许用户对指定的全局角色进行授权(创建 ClusterRoleBinding),即使该用户没有访问全局角色的权限。
+
+:::danger
+
+通配符动词 `*` 也包括 `bind` 和 `escalate`。这意味着为用户的全局角色设置 `*` 也会提供 `escalate` 和 `bind`。
+
+:::
+
+##### 自定义全局角色示例
+
+仅为名称为 `test-gr` 的全局角色授予 escalate 权限:
+
+```yaml
+rules:
+ - apiGroups:
+ - "management.cattle.io"
+ resources:
+ - "globalroles"
+ resourceNames:
+ - "test-gr"
+ verbs:
+ - "escalate"
+```
+
+为所有的全局角色授予 escalate 权限:
+
+```yaml
+rules:
+ - apiGroups:
+ - "management.cattle.io"
+ resources:
+ - "globalroles"
+ verbs:
+ - "escalate"
+```
+
+仅为名称为 `test-gr` 的全局角色授予创建角色绑定(绕过 escalation 检查)的权限
+
+```yaml
+rules:
+ - apiGroups:
+ - "management.cattle.io"
+ resources:
+ - "globalroles"
+ resourceNames:
+ - "test-gr"
+ verbs:
+ - "bind"
+ - apiGroups:
+ - "management.cattle.io"
+ resources:
+ - "globalrolebindings"
+ verbs:
+ - "create"
+```
+
+授予 `*` 权限(同时包含 `escalate` 与 `bind`)
+
+```yaml
+rules:
+ - apiGroups:
+ - "management.cattle.io"
+ resources:
+ - "globalroles"
+ verbs:
+ - "*"
+```
+
+#### 下游集群的全局角色权限
+
+全局角色可以通过 `inheritedClusterRoles` 字段在每个下游集群上授予一个或多个 RoleTemplate。此字段中引用的值必须是集群级别的 RoleTemplate 并且在集群 `context` 中存在。
+
+通过此字段,用户将获取当前或未来的所有下游集群的指定权限,例如以下的全局角色示例:
+
+```yaml
+apiVersion: management.cattle.io/v3
+kind: GlobalRole
+displayName: All Downstream Owner
+metadata:
+ name: all-downstream-owner
+inheritedClusterRoles:
+ - cluster-owner
+```
+
+任何具有此权限的用户都将是所有下游集群的所有者(cluster-owner)。如果添加了新的集群,无论是什么类型的集群,用户也将是这个集群的所有者。
+
+:::danger
+
+在[默认的全局角色](#配置默认的全局权限)中使用此字段可能会导致用户获得过多的权限。
+
+:::
+
+### 配置默认的全局权限
+
+如果你想限制新用户的默认权限,你可以删除作为默认角色的`用户`权限,然后分配多个单独的权限作为默认权限。你也可以在一组其他标准权限之上添加管理权限。
+
+:::note
+
+默认角色仅分配给从外部认证登录的用户。对于本地用户,在将用户添加到 Rancher 时,必须显式分配全局权限。你可以在添加用户时自定义这些全局权限。
+
+:::
+
+要更改在外部用户首次登录时分配给他们的默认全局权限,请执行以下步骤:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。在**角色**页面上,确保选择了**全局**选项卡。
+1. 查找要添加或删除的默认权限集。然后,通过选择 **⋮ > 编辑配置**来编辑权限。
+1. 如果要将权限添加为默认权限,请选择**是:新用户的默认角色**,然后单击**保存**。如果要删除默认权限,请编辑该权限并选择**否**。
+
+**结果**:默认全局权限已根据你的更改配置。分配给新用户的权限会在**新用户的默认角色**列中显示为复选标记。
+
+### 为单个用户配置全局权限
+
+要为单个用户配置权限:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**用户**。
+1. 找到要更改访问级别的用户,然后单击 **⋮ > 编辑配置**。
+1. 在**全局权限**和**内置角色**部分中,选中你希望用户拥有的权限的复选框。如果你在**角色**页面创建了角色,这些角色将出现在**自定义**部分,你也可以选择这些角色。
+1. 单击**保存**。
+
+**结果**:用户的全局权限已更新。
+
+### 为组配置全局权限
+
+如果你有一组需要在 Rancher 中有相同访问权限的用户,你可以一次性将权限分配给整个组来节省时间。这样,组中的用户在第一次登录 Rancher 时能拥有相应级别的访问权限。
+
+将自定义全局角色分配给组后,该角色将在组中用户登录 Rancher 时分配给用户。
+
+对于现有用户,新权限将在用户退出 Rancher 并重新登录时,或当管理员[刷新用户组成员名单](#刷新用户组成员名单)时生效。
+
+对于新用户,新权限在用户首次登录 Rancher 时生效。除了**新用户的默认角色**全局权限外,来自该组的新用户还将获得自定义全局角色的权限。默认情况下,**新用户的默认角色**权限等同于 **Standard User** 全局角色,但默认权限可以[配置。](#配置默认的全局权限)
+
+如果从外部认证服务中将用户从组中删除,该用户将失去分配给该组的自定义全局角色的权限。他们将继续拥有分配给他们的其他剩余角色,这通常包括标记为**新用户的默认角色**的角色。Rancher 将在用户登出或管理员[刷新用户组成员名单](#刷新用户组成员名单)时删除与组关联的权限。
+
+:::note 先决条件:
+
+只有在以下情况下,你才能将全局角色分配给组:
+
+- 你已设置[外部认证](../authentication-config/authentication-config.md#external-vs-local-authentication)
+- 外部认证服务支持[用户组](../authentication-config/manage-users-and-groups.md)
+- 你已使用外部认证服务设置了至少一个用户组。
+
+:::
+
+要将自定义全局角色分配给组,请执行以下步骤:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**组**。
+1. 转到你要分配自定义全局角色的组,然后单击 **⋮ > 编辑配置**。
+1. 在**全局权限**,**自定义**和/或**内置角色**部分中,选择该组应具有的权限。
+1. 单击**创建**。
+
+**结果**:自定义全局角色会在组内用户登录 Rancher 时生效。
+
+### 刷新用户组成员名单
+
+当管理员更新组的全局权限时,更改将在组成员退出 Rancher 并重新登录后生效。
+
+如果要让更改立即生效,管理员或集群所有者可以刷新用户组成员名单。
+
+如果用户已经从外部认证服务中的组中删除,管理员也需要刷新用户组成员名单。在这种情况下,刷新操作会让 Rancher 知道用户已从组中删除。
+
+要刷新用户组成员名单:
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**用户**。
+1. 单击**刷新用户组成员名单**。
+
+**结果**:对组成员权限的所有更改都会生效。
+
+## 受限管理员
+
+:::warning 已弃用
+
+受限管理员角色已弃用,并将在未来的 Rancher 版本(2.10 或更高版本)中删除。你应该创建具有所需权限的自定义角色,而不是依赖此项内置角色。
+
+:::
+
+Rancher 2.5 创建了一个新的 `restricted-admin` 角色,以防止本地 Rancher Server Kubernetes 集群的权限提升。此角色对 Rancher 管理的所有下游集群具有完全管理员权限,但没有更改本地 Kubernetes 集群的权限。
+
+`restricted-admin` 可以创建其他具有同样访问权限的 `restricted-admin` 用户。
+
+Rancher 还增加了一个新设置,来将初始启动的管理员设置为 `restricted-admin` 角色。该设置适用于 Rancher Server 首次启动时创建的第一个用户。如果设置了这个环境变量,则不会创建全局管理员,也就无法通过 Rancher 创建全局管理员。
+
+要以 `restricted-admin` 作为初始用户来启动 Rancher,你需要使用以下环境变量来启动 Rancher Server:
+
+```
+CATTLE_RESTRICTED_DEFAULT_ADMIN=true
+```
+
+### `受限管理员` 的权限列表
+
+下表列出了 `受限管理员` 与 `全局管理员` 和 `普通用户` 角色相比应具有的权限和操作:
+
+| 类别 | 操作 | 全局管理员 | 普通用户 | 受限管理员 | 受限管理员的注意事项 |
+| -------- | ------ | ------------ | ------------- | ---------------- | ------------------------------- |
+| 本地集群功能 | 管理本地集群(列出、编辑、导入主机) | 是 | 否 | 否 | |
+| | 创建项目/命名空间 | 是 | 否 | 否 | |
+| | 添加集群/项目成员 | 是 | 否 | 否 | |
+| | 全局 DNS | 是 | 否 | 否 | |
+| | 访问 CRD 和 CR 的管理集群 | 是 | 否 | 是 | |
+| | 另存为 RKE 模板 | 是 | 否 | 否 | |
+| 安全 | | | | | |
+| 启用认证 | 配置认证 | 是 | 否 | 是 | |
+| 角色 | 创建/分配 GlobalRoles | 是 | 否(可列出) | 是 | 认证 Webhook 允许为已经存在的权限创建 globalrole |
+| | 创建/分配 ClusterRoles | 是 | 否(可列出) | 是 | 不在本地集群中 |
+| | 创建/分配 ProjectRoles | 是 | 否(可列出) | 是 | 不在本地集群中 |
+| 用户 | 添加用户/编辑/删除/停用用户 | 是 | 否 | 是 | |
+| 组 | 将全局角色分配给组 | 是 | 否 | 是 | 在 Webhook 允许的范围内 |
+| | 刷新组 | 是 | 否 | 是 | |
+| PSP | 管理 PSP 模板 | 是 | 否(可列出) | 是 | 与 PSP 的全局管理员权限相同 |
+| 工具 | | | | | |
+| | 管理 RKE 模板 | 是 | 否 | 是 | |
+| | 管理全局应用商店 | 是 | 否 | 是 | 无法编辑/删除内置系统应用商店。可以管理 Helm 库 |
+| | 集群驱动 | 是 | 否 | 是 | |
+| | 主机驱动 | 是 | 否 | 是 | |
+| | GlobalDNS 提供商 | 是 | 是(自己) | 是 | |
+| | GlobalDNS 条目 | 是 | 是(自己) | 是 | |
+| 设置 | | | | | |
+| | 管理设置 | 是 | 否(可列出) | 否(可列出) | |
+| 用户 | | | | | |
+| | 管理 API 密钥 | 是(管理所有) | 是(管理自己的) | 是(管理自己的) | |
+| | 管理节点模板 | 是 | 是(管理自己的) | 是(管理自己的) | 只能管理自己的节点模板,不能管理其他用户创建的节点模板。 |
+| | 管理云凭证 | 是 | 是(管理自己的) | 是(管理自己的) | 只能管理自己的云凭证,不能管理其他用户创建的云凭证。 |
+| 下游集群 | 创建集群 | 是 | 是 | 是 | |
+| | 编辑集群 | 是 | 是 | 是 | |
+| | 轮换证书 | 是 | | 是 | |
+| | 立即创建快照 | 是 | | 是 | |
+| | 恢复快照 | 是 | | 是 | |
+| | 另存为 RKE 模板 | 是 | 否 | 是 | |
+| | 运行 CIS 扫描 | 是 | 是 | 是 | |
+| | 添加成员 | 是 | 是 | 是 | |
+| | 创建项目 | 是 | 是 | 是 | |
+| 自 2.5 起的功能 Chart | | | | | |
+| | 安装 Fleet | 是 | | 是 | 无法在本地集群中运行 Fleet |
+| | 部署 EKS 集群 | 是 | 是 | 是 | |
+| | 部署 GKE 集群 | 是 | 是 | 是 | |
+| | 部署 AKS 集群 | 是 | 是 | 是 | |
+
+
+### 将全局管理员更改为受限管理员
+
+在之前的版本中,文档建议如果管理员角色正在使用中,应将所有用户都更改为受限管理员。现在鼓励用户使用集群权限的功能构建自定义的全局角色,并将受限管理员迁移到新的自定义角色使用。
+
+你可以前往**安全 > 用户**,并将所有管理员角色转为受限管理员。
+
+已登录的用户可以根据需要将自己更改为 `restricted-admin`,但这应该是他们的最后一步操作,否则他们将没有进行该操作的权限。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md
new file mode 100644
index 00000000000..cb0ad71fa38
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md
@@ -0,0 +1,38 @@
+---
+title: 锁定角色
+---
+
+你可以将角色设置为`锁定`状态。锁定角色可防止把这些角色分配给用户。
+
+处于锁定状态的角色具有如下特性:
+
+- 无法再分配给当下还没有被分配到该角色的用户。
+- 将用户添加到集群或项目时,不会在**成员角色**下拉列表中列出。
+- 不会影响在锁定该角色之前,已经分配了该角色的用户。即使后来锁定了该角色,这些用户仍然保留该角色提供的访问权限。
+
+ **示例**:假设你的组织制定了一个内部策略,禁止把创建项目的权限分配给集群用户。这时候你需要执行这个策略。
+
+ 因此,在将新用户添加到集群之前,你需要锁定以下角色:`集群所有者`,`集群成员`和`创建项目`。然后,创建一个新的自定义角色,该角色的权限与`集群成员`相同,但没有创建项目的权限。然后,在将用户添加到集群时使用这个新的自定义角色。
+
+以下用户可以锁定角色:
+
+- 任何分配了`管理员`全局权限的用户。
+- 任何分配了带有`管理角色`权限的`自定义用户`。
+
+
+## 锁定/解锁角色
+
+如果要防止将角色分配给用户,可以将其设置为`锁定`状态。
+
+你可以在两种情况下锁定角色:
+
+- [添加自定义角色](custom-roles.md)时。
+- 编辑现有角色时(见下文)。
+
+集群角色和项目/命名空间角色可以锁定,而全局角色不能锁定。
+
+1. 在左上角,单击 **☰ > 用户 & 认证**。
+1. 在左侧导航栏中,单击**角色**。
+1. 转到**集群**选项卡或**项目或命名空间**选项卡。
+1. 找到要锁定(或解锁)的角色,选择 **⋮ > 编辑配置**。
+1. 从**锁定**选项中,选择**是** 或**否**。然后点击**保存**。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md
new file mode 100644
index 00000000000..0e8a5e89824
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/manage-role-based-access-control-rbac.md
@@ -0,0 +1,29 @@
+---
+title: 管理 RBAC
+---
+
+
+
+
+
+Rancher 通过 _用户_ 进行授权管理。如[认证](../authentication-config/authentication-config.md)中所述,用户可以是本地用户,也可以是外部用户。
+
+配置外部认证后,**用户**页面上显示的用户会发生变化。
+
+- 如果你以本地用户身份登录,则仅显示本地用户。
+
+- 如果你以外部用户身份登录,则会同时显示外部用户和本地用户。
+
+## 用户和角色
+
+一旦用户登录到 Rancher,他们的 _授权_,也就是他们在系统中的访问权限,将由 _全局权限_ 和 _集群和项目角色_ 决定。
+
+- [全局权限](global-permissions.md):
+
+ 定义用户在任何特定集群之外的授权。
+
+- [集群和项目角色](cluster-and-project-roles.md):
+
+ 定义用户在分配了角色的特定集群或项目中的授权。
+
+全局权限以及集群和项目角色都是基于 [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) 实现的。因此,权限和角色的底层实现是由 Kubernetes 完成的。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
new file mode 100644
index 00000000000..1f56b62762f
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/pod-security-standards.md
@@ -0,0 +1,126 @@
+---
+title: Pod 安全标准 (PSS) 和 Pod 安全准入 (PSA)
+---
+
+[Pod 安全标准 (PSS)](https://kubernetes.io/docs/concepts/security/pod-security-standards/) 和 [Pod 安全准入 (PSA)](https://kubernetes.io/docs/concepts/security/pod-security-admission/) 为大量工作负载定义了安全限制。
+它们在 Kubernetes v1.23 中可用并默认打开,并在 Kubernetes v1.25 及更高版本中替换了 [Pod Security Policies (PSP)](https://kubernetes.io/docs/concepts/security/pod-security-policy/)。
+
+PSS 定义了工作负载的安全级别。PSA 描述了 Pod 安全上下文和相关字段的要求。PSA 参考 PSS 级别来定义安全限制。
+
+## 升级到 Pod 安全标准 (PSS)
+
+确保将所有 PSP 都迁移到了另一个工作负载安全机制,包括将你当前的 PSP 映射到 Pod 安全标准,以便使用 [PSA 控制器](https://kubernetes.io/docs/concepts/security/pod-security-admission/)执行。如果 PSA 控制器不能满足企业的所有需求,建议你使用策略引擎,例如 [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper)、[Kubewarden](https://www.kubewarden.io/)、[Kyverno](https://kyverno.io/) 或 [NeuVector](https://neuvector.com/)。有关如何迁移 PSP 的更多信息,请参阅你选择的策略引擎的文档。
+
+:::caution
+必须在删除 PodSecurityPolicy 对象_之前_添加新的策略执行机制。否则,你可能会为集群内的特权升级攻击创造机会。
+:::
+
+### 从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies {#remove-psp-rancher-workloads}
+
+Rancher v2.7.2 提供了 Rancher 维护的 Helm Chart 的新主要版本。v102.x.y 允许你删除与以前的 Chart 版本一起安装的 PSP。这个新版本使用标准化的 `global.cattle.psp.enabled` 开关(默认关闭)替换了非标准的 PSP 开关。
+
+你必须在_仍使用 Kubernetes v1.24_ 时执行以下步骤:
+1. 根据需要配置 PSA 控制器。你可以使用 Rancher 的内置 [PSA 配置模板](#psa-config-templates),或创建自定义模板并将其应用于正在迁移的集群。
+
+1. 将活动的 PSP 映射到 Pod 安全标准:
+ 1. 查看集群中哪些 PSP 仍处于活动状态:
+ :::caution
+ 此策略可能会错过当前未运行的工作负载,例如 CronJobs、当前缩放为零的工作负载或尚未推出的工作负载。
+ :::
+
+ ```shell
+ kubectl get pods \
+ --all-namespaces \
+ --output jsonpath='{.items[*].metadata.annotations.kubernetes\.io\/psp}' \
+ | tr " " "\n" | sort -u
+ ```
+
+ 1. 按照[将 PSP 映射到 Pod 安全标准](https://kubernetes.io/docs/reference/access-authn-authz/psp-to-pod-security-standards/)的 Kubernetes 指南将 PSS 应用于依赖 PSP 的工作负载。有关详细信息,请参阅[从 PodSecurityPolicy 迁移到内置 PodSecurity Admission 控制器](https://kubernetes.io/docs/tasks/configure-pod-container/migrate-from-psp/)。
+
+1. 要从 Rancher Chart 中删除 PSP,请在升级到 Kubernetes v1.25 _之前_将 Chart 升级到最新的 v102.x.y 版本。确保 **Enable PodSecurityPolicies** 选项**已禁用**。这将删除与以前的 Chart 版本一起安装的所有 PSP。
+
+:::info 重要提示
+如果你想将 Chart 升级到 v102.x.y,但不打算将集群升级到 Kubernetes v1.25 和弃用 PSP,请确保为每个要升级的 Chart 选择 **Enable PodSecurityPolicies** 选项。
+:::
+
+### 在 Kubernetes v1.25 升级后清理版本
+
+如果你在删除 Chart 的 PSP 时遇到问题,或者 Chart 不包含用于删除 PSP 的内置机制,Chart 升级或删除可能会失败并显示如下错误消息:
+```console
+Error: UPGRADE FAILED: resource mapping not found for name: "" namespace: "" from "": no matches for kind "PodSecurityPolicy" in version "policy/v1beta1"
+ensure CRDs are installed first
+```
+
+Helm 尝试在集群中查询存储在先前版本的数据 blob 中的对象时,就会发生这种情况。要清理这些版本并避免此错误,请使用 `helm-mapkubeapis` Helm 插件。要详细了解 `helm-mapkubeapis`、它的工作原理以及如何针对你的用例进行微调,请参阅 [Helm 官方文档](https://github.com/helm/helm-mapkubeapis#readme)。
+
+请注意,Helm 插件安装在你运行命令的机器本地。因此,请确保从同一台机器运行安装和清理。
+
+#### 安装 `helm-mapkubeapis`
+
+1. 在打算使用 `helm-mapkubeapis` 的机器上打开你的终端并安装插件:
+ ```shell
+ helm plugin install https://github.com/helm/helm-mapkubeapis
+ ```
+
+ 你将看到类似于以下的输出:
+ ```console
+ Downloading and installing helm-mapkubeapis v0.4.1 ...
+ https://github.com/helm/helm-mapkubeapis/releases/download/v0.4.1/helm-mapkubeapis_0.4.1_darwin_amd64.tar.gz
+ Installed plugin: mapkubeapis
+ ```
+
+ :::info 重要提示
+ 确保 `helm-mapkubeapis` 插件至少为 v0.4.1,因为旧版本_不_支持资源删除。
+ :::
+
+1. 验证插件是否已正确安装:
+ ```shell
+ helm mapkubeapis --help
+ ```
+
+ 你将看到类似于以下的输出:
+ ```console
+ Map release deprecated or removed Kubernetes APIs in-place
+
+ Usage:
+ mapkubeapis [flags] RELEASE
+
+ Flags:
+ --dry-run simulate a command
+ -h, --help help for mapkubeapis
+ --kube-context string name of the kubeconfig context to use
+ --kubeconfig string path to the kubeconfig file
+ --mapfile string path to the API mapping file
+ --namespace string namespace scope of the release
+ ```
+
+#### 清理损坏的版本
+
+安装 `helm-mapkubeapis` 插件后,清理升级到 Kubernetes v1.25 后损坏的版本。
+
+1. 打开你的首选终端并通过运行 `kubectl cluster-info` 确保终端已连接到所需集群。
+
+1. 运行 `helm list --all-namespaces` 列出你在集群中安装的所有版本。
+
+1. 通过运行 `helm mapkubeapis --dry-run --namespace ` 为要清理的每个版本执行试运行。你可以通过此命令的结果了解要替换或删除哪些资源。
+
+1. 最后,在查看更改后,使用 `helm mapkubeapis --namespace ` 执行完整运行。
+
+#### 将 Chart 升级到支持 Kubernetes v1.25 的版本
+
+清理了具有 PSP 的所有版本后,你就可以继续升级了。对于 Rancher 维护的工作负载,请按照本文档[从 Rancher 维护的应用程序和市场工作负载中删除 PodSecurityPolicies](#remove-psp-rancher-workloads) 部分中的步骤进行操作。
+如果工作负载不是由 Rancher 维护的,请参阅对应的提供商的文档。
+
+:::caution
+不要跳过此步骤。与 Kubernetes v1.25 不兼容的应用程序不能保证在清理后正常工作。
+:::
+
+## Pod 安全准入配置模板 {#psa-config-templates}
+
+Rancher 提供了 PSA 配置模板。它们是可以应用到集群的预定义安全配置。Rancher 管理员(或具有权限的人员)可以[创建、管理和编辑](./psa-config-templates.md) PSA 模板。
+
+### 受 PSA 限制的集群上的 Rancher
+
+Rancher system 命名空间也受到 PSA 模板描述的限制性安全策略的影响。你需要在分配模板后豁免 Rancher 的 system 命名空间,否则集群将无法正常运行。有关详细信息,请参阅 [Pod 安全准入 (PSA) 配置模板](./psa-config-templates.md#豁免必须的-rancher-命名空间)。
+
+有关运行 Rancher 所需的所有豁免的完整文件,请参阅此[准入配置示例](../../../reference-guides/rancher-security/psa-restricted-exemptions.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
new file mode 100644
index 00000000000..c3b4d6252f4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/psa-config-templates.md
@@ -0,0 +1,138 @@
+---
+title: Pod 安全准入 (PSA) 配置模板
+---
+
+[Pod Security admission (PSA)](./pod-security-standards.md) 配置模板是 Rancher 自定义资源 (CRD),在 Rancher v2.7.2 及更高版本中可用。这些模板提供了可应用于集群的预定义安全配置:
+
+- `rancher-privileged`:最宽松的配置。它不限制任何 Pod 行为,允许已知的权限升级。该策略没有豁免。
+- `rancher-restricted`:严格限制的配置,遵循当前加固 pod 的最佳实践。你必须对 Rancher 组件进行[命名空间级别豁免](./pod-security-standards.md#受-psa-限制的集群上的-rancher)。
+
+## 分配 Pod 安全准入 (PSA) 配置模板
+
+你可以在创建下游集群的同时分配 PSA 模板。你还可以通过配置现有集群来添加模板。
+
+### 在集群创建期间分配模板
+
+
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,单击**创建**按钮。
+1. 选择提供商。
+1. 在**集群: 创建**页面上,转到**基本信息 > 安全**。
+1. 在 **PSA 配置模板**下拉菜单中,选择要分配的模板。
+1. 单击**创建**。
+
+### 将模板分配给现有集群
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**表中找到要更新的集群,点击 **⋮**。
+1. 选择**编辑配置**。
+1. 在 **PSA 配置模板**下拉菜单中,选择要分配的模板。
+1. 单击**保存**。
+
+### 加固集群
+
+如果选择 **rancher-restricted** 模板但不选择 **CIS 配置文件**,你将无法满足 CIS Benchmark。有关详细信息,请参阅 [RKE2 加固指南](../../../pages-for-subheaders/rke2-hardening-guide.md)。
+
+
+
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,单击**创建**按钮。
+1. 选择提供商。
+1. 在**添加集群**页面上的**集群选项**下,单击 **高级选项**。
+1. 在 **PSA 配置模板**下拉菜单中,选择要分配的模板。
+1. 单击**创建**。
+
+### 将模板分配给现有集群
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**表中找到要更新的集群,点击 **⋮**。
+1. 选择**编辑配置**。
+1. 在**编辑集群**页面上,转到**集群选项 > 高级选项**。
+1. 在 **PSA 配置模板**中,选择要分配的模板。
+1. 单击**保存**。
+
+
+
+
+## 添加或编辑 Pod 安全准入 (PSA) 配置模板
+
+如果你拥有管理员权限,则可以通过创建其他 PSA 模板或编辑现有模板来自定义安全限制和权限。
+
+:::caution
+如果编辑使用中的现有 PSA 模板,更改将应用于已分配给该模板的所有集群。
+:::
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 点击**高级选项**打开下拉菜单。
+1. 选择 **Pod 安全准入**。
+1. 找到要修改的模板,点击 **⋮**。
+1. 选择**编辑配置**来编辑模板。
+1. 完成配置编辑后,单击**保存**。
+
+### 允许非管理员用户管理 PSA 模板
+
+如果你想允许其他用户管理模板,你可以将该用户绑定到一个角色,并为该角色授予 `management.cattle.io/podsecurityadmissionconfigurationtemplates` 上的所有操作 (`"*"`)。
+
+:::caution
+绑定到上述权限的用户都能够更改使用该 PSA 模板的_所有_托管集群的限制级别,包括用户没有权限的集群。
+:::
+
+## 豁免必须的 Rancher 命名空间
+
+在默认执行限制性安全策略的 Kubernetes 集群上运行 Rancher 时,你需要[豁免以下命名空间](#豁免命名空间),否则该策略可能会阻止 Rancher system pod 正常运行。
+
+- `calico-apiserver`
+- `calico-system`
+- `cattle-alerting`
+- `cattle-csp-adapter-system`
+- `cattle-epinio-system`
+- `cattle-externalip-system`
+- `cattle-fleet-local-system`
+- `cattle-fleet-system`
+- `cattle-gatekeeper-system`
+- `cattle-global-data`
+- `cattle-global-nt`
+- `cattle-impersonation-system`
+- `cattle-istio`
+- `cattle-istio-system`
+- `cattle-logging`
+- `cattle-logging-system`
+- `cattle-monitoring-system`
+- `cattle-neuvector-system`
+- `cattle-prometheus`
+- `cattle-sriov-system`
+- `cattle-system`
+- `cattle-ui-plugin-system`
+- `cattle-windows-gmsa-system`
+- `cert-manager`
+- `cis-operator-system`
+- `fleet-default`
+- `ingress-nginx`
+- `istio-system`
+- `kube-node-lease`
+- `kube-public`
+- `kube-system`
+- `longhorn-system`
+- `rancher-alerting-drivers`
+- `security-scan`
+- `tigera-operator`
+
+Rancher、Rancher 拥有的一些 Chart 以及 RKE2 和 K3s 发行版都使用这些命名空间。列出的命名空间的一个子集已经在内置的 Rancher `rancher-restricted` 策略中被豁免,用于下游集群。有关运行 Rancher 所需的所有豁免的完整模板,请参阅此[准入配置示例](../../../reference-guides/rancher-security/psa-restricted-exemptions.md)。
+
+## 豁免命名空间
+
+如果你将 `rancher-restricted` 模板分配给集群,默认情况下,限制会在命名空间级别应用于整个集群。要在此高度受限的策略下豁免特定的命名空间,执行以下操作:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 点击**高级选项**打开下拉菜单。
+1. 选择 **Pod 安全准入**。
+1. 找到要修改的模板,点击 **⋮**。
+1. 选择**编辑配置**。
+1. 选中**豁免**下的**命名空间**复选框以编辑**命名空间**字段。
+1. 豁免命名空间后,单击**保存**。
+
+:::note
+你需要更新目标集群才能让新模板在集群中生效。要触发更新,在不更改值的情况下编辑和保存集群。
+:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md
new file mode 100644
index 00000000000..51b24396cbd
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md
@@ -0,0 +1,74 @@
+---
+title: 备份 Docker 安装的 Rancher
+---
+
+成功使用 Docker 安装 Rancher 后,我们建议你定期创建备份。最近创建的备份能让你在意外灾难发生后快速进行恢复。
+
+## 在你开始前
+
+在创建备份的过程中,你将输入一系列命令。请使用环境中的数据替换占位符。占位符用尖括号和大写字母(如 ``)表示。以下是带有占位符的命令示例:
+
+```
+docker run --name busybox-backup- --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher
+```
+
+在该命令中,`` 是数据容器和备份创建日期的占位符(例如,`9-27-18`)。
+
+请交叉参考下方的图片和表格,了解获取此占位符数据的方法。在开始[以下步骤](#创建备份)之前,请记下或复制这些信息。
+
+终端 docker ps 命令,显示如何找到 <RANCHER_CONTAINER_TAG> 和 <RANCHER_CONTAINER_NAME>
+
+
+
+| 占位符 | 示例 | 描述 |
+| -------------------------- | -------------------------- | --------------------------------------------------------- |
+| `` | `v2.0.5` | 首次安装拉取的 rancher/rancher 镜像。 |
+| `` | `festive_mestorf` | 你的 Rancher 容器的名称。 |
+| `` | `v2.0.5` | 你为其创建备份的 Rancher 版本。 |
+| `` | `9-27-18` | 数据容器或备份的创建日期。 |
+
+
+可以通过远程连接登录到 Rancher Server 所在的主机并输入命令 `docker ps` 以查看正在运行的容器,从而获得 `` 和 ``。你还可以运行 `docker ps -a` 命令查看停止了的容器。在创建备份期间,你随时可以运行这些命令来获得帮助。
+
+## 创建备份
+
+此步骤将创建一个备份文件。如果 Rancher 遇到灾难情况,你可以使用该备份文件进行还原。
+
+
+1. 使用远程终端连接,登录到运行 Rancher Server 的节点。
+
+1. 停止当前运行 Rancher Server 的容器。将 `` 替换为你的 Rancher 容器的名称:
+
+ ```
+ docker stop
+ ```
+1. 运行以下命令,从刚才停止的 Rancher 容器创建一个数据容器。请替换命令中的占位符:
+
+ ```
+ docker create --volumes-from --name rancher-data- rancher/rancher:
+ ```
+
+1. 从你刚刚创建的数据容器(rancher-data-<DATE>)中,创建一个备份 tar 包(rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz)。替换占位符来运行以下命令:
+
+ ```
+ docker run --name busybox-backup- --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher
+ ```
+
+ **步骤结果**:屏幕上将运行命令流。
+
+1. 输入 `ls` 命令,确认备份压缩包已创建成功。压缩包的名称格式类似 `rancher-data-backup--.tar.gz`。
+
+1. 将备份压缩包移动到 Rancher Server 外的安全位置。然后从 Rancher Server 中删除 `rancher-data-` 和 `busybox-backup-` 容器。
+
+ ```
+ docker rm rancher-data-
+ docker rm busybox-backup-
+ ```
+
+1. 重启 Rancher Server。将 `` 替换为 Rancher 容器的名称:
+
+ ```
+ docker start
+ ```
+
+**结果**:创建了 Rancher Server 数据的备份压缩包。如果你需要恢复备份数据,请参见[恢复备份:Docker 安装](restore-docker-installed-rancher.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
new file mode 100644
index 00000000000..3ba17f6ed64
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md
@@ -0,0 +1,307 @@
+---
+title: 备份集群
+---
+
+在 Rancher UI 中,你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。
+
+Rancher 建议为所有生产集群配置定期 `etcd` 快照。此外,你还可以创建单次快照。
+
+etcd 数据库的快照会保存在 [etcd 节点](#本地备份目标)或 [S3 兼容目标](#s3-备份目标)上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
+
+## 快照工作原理
+
+### 快照组件
+
+
+
+
+Rancher 创建快照时,快照里包括三个组件:
+
+- etcd 中的集群数据
+- Kubernetes 版本
+- `cluster.yml` 形式的集群配置
+
+由于 Kubernetes 版本现在包含在快照中,因此你可以将集群恢复到原本的 Kubernetes 版本。
+
+
+
+
+Rancher 将快照创建任务委托给下游 Kubernetes 引擎。Kubernetes 引擎创建快照时包括了三个组件:
+
+- etcd 中的集群数据
+- Kubernetes 版本
+- 集群配置
+
+由于 Kubernetes 版本包含在快照中,因此你可以将集群还原到之前的 Kubernetes 版本,同时还原 etcd 快照。
+
+
+
+
+如果你需要使用快照恢复集群,快照的多个组件允许你选择:
+
+- **仅恢复 etcd 内容**:类似于在 Rancher v2.4.0 之前版本中的使用快照恢复。
+- **恢复 etcd 和 Kubernetes 版本**:如果 Kubernetes 升级导致集群失败,并且你没有更改任何集群配置,则应使用此选项。
+- **恢复 etcd、Kubernetes 版本和集群配置**:如果你在升级时同时更改了 Kubernetes 版本和集群配置,则应使用此选项。
+
+建议你在执行配置更改或升级之前创建新快照。
+
+
+### 从 etcd 节点生成快照
+
+
+
+
+集群中的每个 etcd 节点都会检查 etcd 集群的健康状况。如果节点报告 etcd 集群是健康的,则会从中创建一个快照,并可选择上传到 S3。
+
+快照存储在 `/opt/rke/etcd-snapshots` 中。如果该目录在节点上配置为共享挂载,它将被覆盖。由于所有 etcd 节点都会上传快照并保留最后一个,因此 S3 上始终会保留最后一个上传的节点的快照。
+
+在存在多个 etcd 节点的情况下,任何快照都是在集群健康检查通过后创建的,因此这些快照可以认为是 etcd 集群中数据的有效快照。
+
+
+
+
+快照是默认启动的。
+
+快照目录默认为 `/var/lib/rancher//server/db/snapshots`,其中 `` 可以是 `rke2` 或 `k3s`。
+
+在 RKE2 中,快照会存储在每个 etcd 节点上。如果你有多个 etcd 或 etcd + control plane 节点,你将拥有本地 etcd 快照的多个副本。
+
+
+
+
+### 快照命名规则
+
+
+
+
+快照的名称是自动生成的。在使用 RKE CLI 创建一次性快照时,你可以使用 `--name` 选项来指定快照的名称。
+
+Rancher 在创建 RKE 集群的快照时,快照名称是基于快照创建类型(手动快照或定期快照)和目标(快照是保存在本地还是上传到 S3)决定的。命名规则如下:
+
+- `m` 代表手动
+- `r` 代表定期
+- `l` 代表本地
+- `s` 代表 S3
+
+快照名称示例如下:
+
+- c-9dmxz-rl-8b2cx
+- c-9dmxz-ml-kr56m
+- c-9dmxz-ms-t6bjb
+- c-9dmxz-rs-8gxc8
+
+
+
+
+快照的名称是自动生成的。使用 RKE2 或 K3s CLI 创建一次性快照时,`--name` 选项可用于覆盖快照的基本名称。
+
+Rancher 在创建 RKE2 或 K3s 集群的快照时,快照名称是基于快照创建类型(手动快照或定期快照)和目标(快照是保存在本地还是上传到 S3)决定的。命名规则如下:
+
+`--`
+
+``:`--name` 设置的基本名称,可以是以下之一:
+
+- `etcd-snapshot`:位于定期快照前面
+- `on-demand`:位于手动按需快照之前
+
+``:创建快照的节点的名称。
+
+``:快照创建日期的 unix 时间戳。
+
+快照名称示例如下:
+
+- `on-demand-my-super-rancher-k8s-node1-1652288934`
+- `on-demand-my-super-rancher-k8s-node2-1652288936`
+- `etcd-snapshot-my-super-rancher-k8s-node1-1652289945`
+- `etcd-snapshot-my-super-rancher-k8s-node2-1652289948`
+
+
+
+
+### 从快照恢复的工作原理
+
+
+
+
+在恢复时会发生以下过程:
+
+1. 如果配置了 S3,则从 S3 检索快照。
+2. 如果快照压缩了,则将快照解压缩。
+3. 集群中的一个 etcd 节点会将该快照文件提供给其他节点。
+4. 其他 etcd 节点会下载快照并验证校验和,以便都能使用相同的快照进行恢复。
+5. 集群已恢复,恢复后的操作将在集群中完成。
+
+
+
+
+在还原时,Rancher 会提供几组执行还原的计划。期间将包括以下阶段:
+
+- Started
+- Shutdown
+- Restore
+- RestartCluster
+- Finished
+
+如果 etcd 快照还原失败,阶段将设置为 `Failed`。
+
+1. 收到 etcd 快照还原请求后,根据 `restoreRKEConfig` 协调集群配置和 Kubernetes 版本。
+1. 该阶段设置为 `Started`。
+1. 该阶段设置为 `Shutdown`,并使用运行 `killall.sh` 脚本的计划来关闭整个集群。一个新的初始节点会被选举出来。如果还原的快照是本地快照,则选择该快照所在的节点作为初始节点。如果使用 S3 还原快照,将使用现有的初始节点。
+1. 该阶段设置为 `Restore`,并且快照将还原到初始节点上。
+1. 该阶段设置为 `RestartCluster`,集群将重启并重新加入到具有新还原的快照信息的新初始节点。
+1. 该阶段设置为 `Finished`,集群被视为已成功还原。`cattle-cluster-agent` 将重新连接,集群将完成协调。
+
+
+
+
+## 配置定期快照
+
+
+
+
+选择创建定期快照的频率以及要保留的快照数量。时间的单位是小时。用户可以使用时间戳快照进行时间点恢复。
+
+默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)会配置为创建定期快照(保存到本地磁盘)。为防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+
+在集群配置或编辑集群期间,可以在**集群选项**的高级部分中找到快照的配置。点击**显示高级选项**。
+
+在集群的**高级选项**中可以配置以下选项:
+
+| 选项 | 描述 | 默认值 |
+| --- | ---| --- |
+| etcd 快照备份目标 | 选择要保存快照的位置。可以是本地或 S3 | 本地 |
+| 启用定期 etcd 快照 | 启用/禁用定期快照 | 是 |
+| 定期 etcd 快照的创建周期 | 定期快照之间的间隔(以小时为单位) | 12 小时 |
+| 定期 etcd 快照的保留数量 | 要保留的快照数量 | 6 |
+
+
+
+
+设置创建定期快照的方式以及要保留的快照数量。该计划采用传统的 Cron 格式。保留策略规定了在每个节点上要保留的匹配名称的快照数量。
+
+默认情况下,[Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)从凌晨 12 点开始每 5 小时创建一次定期快照(保存到本地磁盘)。为了防止本地磁盘故障,建议使用 [S3 目标](#s3-备份目标)或复制磁盘上的路径。
+
+在集群配置或编辑集群期间,你可以在**集群配置**下找到快照配置。单击 **etcd**。
+
+| 选项 | 描述 | 默认值 |
+| --- | ---| --- |
+| 启用定期 etcd 快照 | 启用/禁用定期快照 | 是 |
+| 定期 etcd 快照的创建周期 | 定期快照的 Cron 计划 | `0 */5 * * *` |
+| 定期 etcd 快照的保留数量 | 要保留的快照数量 | 5 |
+
+
+
+
+## 单次快照
+
+
+
+
+除了定期快照之外,你可能还想创建“一次性”快照。例如,在升级集群的 Kubernetes 版本之前,最好备份集群的状态以防止升级失败。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,导航到要在其中创建一次性快照的集群。
+1. 单击 **⋮ > 拍摄快照**。
+
+
+
+
+除了定期快照之外,你可能还想创建“一次性”快照。例如,在升级集群的 Kubernetes 版本之前,最好备份集群的状态以防止升级失败。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,导航到要在其中创建一次性快照的集群。
+1. 导航至`快照`选项卡,然后单击`立即创建快照`
+
+### 创建一次性快照的工作原理
+
+在创建一次性快照时,Rancher 会传递几组计划来执行快照创建。期间将包括以下阶段:
+
+- Started
+- RestartCluster
+- Finished
+
+如果 etcd 快照创建失败,阶段将设置为 `Failed`。
+
+1. 收到 etcd 快照创建请求。
+1. 该阶段设置为 `Started`。集群中的所有 etcd 节点都会根据集群配置收到创建 etcd 快照的计划。
+1. 该阶段设置为 `RestartCluster`,并且每个 etcd 节点上的计划都将重置为 etcd 节点的原始计划。
+1. 该阶段设置为 `Finished`。
+
+
+
+
+**结果**:根据你的[快照备份目标](#快照备份目标)创建一次性快照,并将其保存在选定的备份目标中。
+
+## 快照备份目标
+
+Rancher 支持两种不同的备份目标:
+
+- [本地目标](#本地备份目标)
+- [S3 目标](#s3-备份目标)
+
+### 本地备份目标
+
+
+
+
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会在本地自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中 etcd 节点的 `/opt/rke/etcd-snapshots` 中。所有定期快照都是按照配置的时间间隔创建的。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+
+
+
+
+默认情况下会选择 `local` 备份目标。此选项的好处是不需要进行外部配置。快照会自动保存到 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)中的本地 etcd 节点上的 `/var/lib/rancher//server/db/snapshots` 中,其中 `` 可以是 `k3s` 或 `rke2`。所有定期快照均按照 Cron 计划进行。使用 `local` 备份目标的缺点是,如果发生全面灾难并且丢失 _所有_ etcd 节点时,则无法恢复集群。
+
+
+
+
+### S3 备份目标
+
+我们建议你使用 `S3` 备份目标。你可以将快照存储在外部 S3 兼容的后端上。由于快照不存储在本地,因此即使丢失所有 etcd 节点,你仍然可以还原集群。
+
+虽然 `S3` 比本地备份具有优势,但它需要额外的配置。
+
+:::caution
+
+如果你使用 S3 备份目标,请确保每个集群都有自己的存储桶或文件夹。Rancher 将使用集群配置的 S3 存储桶或文件夹中的可用快照来填充快照信息。
+
+:::
+
+| 选项 | 描述 | 必填 |
+|---|---|---|
+| S3 存储桶名称 | 用于存储备份的 S3 存储桶名称 | * |
+| S3 区域 | 备份存储桶的 S3 区域 | |
+| S3 区域端点 | 备份存储桶的 S3 区域端点 | * |
+| S3 访问密钥 | 有权访问备份存储桶的 S3 访问密钥 | * |
+| S3 密文密钥 | 有权访问备份存储桶的 S3 密文密钥 | * |
+| 自定义 CA 证书 | 用于访问私有 S3 后端的自定义证书 |
+
+### 为 S3 使用自定义 CA 证书
+
+备份快照可以存储在自定义 `S3` 备份中,例如 [minio](https://min.io/)。如果 S3 后端使用自签名或自定义证书,请使用`自定义 CA 证书`选项来提供自定义证书,从而连接到 S3 后端。
+
+### 在 S3 中存储快照的 IAM 支持
+
+除了使用 API 凭证之外,`S3` 备份目标还支持对 AWS API 使用 IAM 身份验证。IAM 角色会授予应用在对 S3 存储进行 API 调用时的临时权限。要使用 IAM 身份验证,必须满足以下要求:
+
+- 集群 etcd 节点必须具有实例角色,该角色具有对指定备份存储桶的读/写访问权限。
+- 集群 etcd 节点必须对指定的 S3 端点具有网络访问权限。
+- Rancher Server worker 节点必须具有实例角色,该实例角色具有对指定备份存储桶的读/写访问权限。
+- Rancher Server worker 节点必须对指定的 S3 端点具有网络访问权限。
+
+要授予应用对 S3 的访问权限,请参阅[使用 IAM 角色向在 Amazon EC2 实例上运行的应用授予权限](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html)的 AWS 文档。
+
+## 查看可用快照
+
+Rancher UI 中提供了集群所有可用快照的列表:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,转到要查看快照的集群并单击其名称。
+1. 单击**快照**选项卡来查看已保存快照的列表。这些快照包括创建时间的时间戳。
+
+## 安全时间戳(RKE)
+
+快照文件带有时间戳,从而简化使用外部工具和脚本处理文件的过程。但在某些与 S3 兼容的后端中,这些时间戳无法使用。
+
+添加了选项 `safe_timestamp` 以支持兼容的文件名。当此标志设置为 `true` 时,快照文件名时间戳中的所有特殊字符都将被替换。
+
+此选项不能直接在 UI 中使用,只能通过`以 YAML 文件编辑`使用。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
new file mode 100644
index 00000000000..cf023f56e0a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md
@@ -0,0 +1,92 @@
+---
+title: 备份 Rancher
+---
+
+在本节中,你将学习如何备份运行在任何 Kubernetes 集群上的 Rancher。要备份通过 Docker 安装的 Rancher,请参见[单节点备份](back-up-docker-installed-rancher.md)。
+
+`backup-restore` operator 需要安装在 local 集群上,并且只对 Rancher 应用进行备份。备份和恢复操作仅在本地 Kubernetes 集群中执行。
+
+请知悉,`rancher-backup` operator 的 2.x.x 版本用于 Rancher v2.6.x。
+
+:::caution
+
+当把备份恢复到一个新的 Rancher 设置中时,新设置的版本应该与备份的版本相同。在恢复备份时还应考虑 Kubernetes 的版本,因为集群中支持的 apiVersion 和备份文件中的 apiVersion 可能不同。
+
+:::
+
+### 先决条件
+
+Rancher 必须是 2.5.0 或更高版本。
+
+请参见[此处](migrate-rancher-to-new-cluster.md#2-使用-restore-自定义资源来还原备份)获取在 Rancher 2.6.3 中将现有备份文件恢复到 v1.22 集群的帮助。
+
+### 1. 安装 Rancher Backup Operator
+
+备份存储位置是 operator 级别的设置,所以需要在安装或升级 `rancher backup` 应用时进行配置。
+
+备份文件的格式是 `.tar.gz`。这些文件可以推送到 S3 或 Minio,也可以存储在一个持久卷中。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到 `local` 集群并单击 **Explore**。Rancher Server 运行在 `local` 集群中。
+1. 单击 **Apps > Charts**。
+1. 点击 **Rancher 备份**。
+1. 单击**安装**。
+1. 配置默认存储位置。如需获取帮助,请参见[存储配置](../../../reference-guides/backup-restore-configuration/storage-configuration.md)。
+1. 单击**安装**。
+
+:::note
+
+使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../deploy-apps-across-clusters/fleet.md#故障排除)获得解决方法。
+
+:::
+
+### 2. 执行备份
+
+要执行备份,必须创建 Backup 类型的自定义资源。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到 `local` 集群并单击 **Explore**。
+1. 在左侧导航栏中,点击 **Rancher 备份 > 备份**。
+1. 单击**创建**。
+1. 使用表单或 YAML 编辑器创建 Backup。
+1. 要使用该表单配置 Backup 详细信息,请单击**创建**,然后参见[配置参考](../../../reference-guides/backup-restore-configuration/backup-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md#备份)进行操作。
+1. 要使用 YAML 编辑器,单击**创建 > 使用 YAML 文件创建**。输入 Backup YAML。这个示例 Backup 自定义资源将在 S3 中创建加密的定期备份。这个应用使用 `credentialSecretNamespace` 值来确定在哪里寻找 S3 备份的密文:
+
+ ```yaml
+ apiVersion: resources.cattle.io/v1
+ kind: Backup
+ metadata:
+ name: s3-recurring-backup
+ spec:
+ storageLocation:
+ s3:
+ credentialSecretName: s3-creds
+ credentialSecretNamespace: default
+ bucketName: rancher-backups
+ folder: rancher
+ region: us-west-2
+ endpoint: s3.us-west-2.amazonaws.com
+ resourceSetName: rancher-resource-set
+ encryptionConfigSecretName: encryptionconfig
+ schedule: "@every 1h"
+ retentionCount: 10
+ ```
+
+ :::note
+
+ 使用 YAML 编辑器创建 Backup 资源时,`resourceSetName` 必须设置为 `rancher-resource-set`。
+
+ :::
+
+ 如需获得配置 Backup 的帮助,请参见[配置参考](../../../reference-guides/backup-restore-configuration/backup-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md#备份)。
+
+ :::caution
+
+ `rancher-backup` operator 不保存 `EncryptionConfiguration` 文件。创建加密备份时,必须保存 `EncryptionConfiguration` 文件的内容,而且在使用备份还原时必须使用同一个文件。
+
+ :::
+
+1. 单击**创建**。
+
+**结果**:备份文件创建在 Backup 自定义资源中配置的存储位置中。执行还原时使用该文件的名称。
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md
new file mode 100644
index 00000000000..03b4a704185
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-restore-usage-guide.md
@@ -0,0 +1,129 @@
+---
+title: 备份恢复使用指南
+---
+
+Rancher Backups Chart 是我们的灾难恢复和迁移解决方案。此 Chart 用于备份 Kubernetes 资源并将其保存到各种持久存储位置。
+
+这个 Chart 是一个非常简单的工具,适用于 Rancher 生态系统的许多不同领域。但是,也因此出现了未记录功能的边缘用例。本文档旨在强调 Rancher Backups 正确的用法,并讨论我们遇到的一些边缘情况。
+
+## 功能概述
+
+### 备份
+
+该 Operator 将 Chart 中的 resourceSet 捕获的所有资源收集为内存中的非结构化对象。收集资源后,资源的 tar 包将保存为 JSON 格式的清单集合,然后上传到用户定义的对象存储。该备份可以按重复计划进行,也可以进行加密。由于某些资源是敏感的,并且值以未加密的明文形式存储,因此此加密选项很重要。
+
+有关配置备份的选项(包括加密),请参阅[备份配置文档](../../../reference-guides/backup-restore-configuration/backup-configuration.md)。
+
+:::note
+
+如[备份 Rancher 文档](./back-up-rancher.md)所述,你必须手动保存加密配置文件的内容,因为 Operator **不会**备份它。
+
+:::
+
+### 还原
+
+有两种主要的还原场景:还原正在运行 Rancher 的集群以及还原新集群。只有将备份还原到该备份的源集群,且在还原过程中启用了 [`prune` 选项](../../../reference-guides/backup-restore-configuration/restore-configuration.md#还原过程中修剪)时,你才能还原正在运行 Rancher 的集群。还原具有与备份类似的输入。它需要备份文件名、encryptionConfigSecret 名称和存储位置。
+
+资源按以下顺序还原:
+
+1. 自定义资源定义(CRD)
+2. 集群范围资源
+3. 命名空间资源
+
+有关配置还原的选项,请参阅[还原配置文档](../../../reference-guides/backup-restore-configuration/restore-configuration.md)。
+
+### 资源集
+
+ResourceSet 确定了 backup-restore-operator 在备份中收集哪些资源。它是一组 ResourceSelector,使用键/值对匹配、正则表达式匹配或 Kubernetes 客户端 labelSelector 来定义选择要求。
+
+以下是可用于 resourceSelector 的字段:
+
+- apiVersion
+- excludeKinds
+- excludeResourceNameRegexp
+- kinds
+- kindsRegexp
+- labelSelectors
+- namespaceRegexp
+- namespaces
+- resourceNameRegexp
+- resourceNames
+
+Rancher Backups Chart 包含了一个[默认 resourceSet](https://github.com/rancher/backup-restore-operator/tree/release/v3.0/charts/rancher-backup/files/default-resourceset-contents),它是安装 Chart 时附加到一个大型 resourceSet 的 YAML 文件组合。文件顺序并不重要。不同版本的 resourceSet 可能有所不同。
+
+:::caution
+
+如果你希望编辑 resourceSet,请在安装 Chart 之前进行**编辑**。
+
+:::
+
+## 正确使用
+
+本节概述了如何根据用例正确使用 Rancher Backups Chart。
+
+### 所有案例
+
+- Rancher Backups 必须安装在 local 集群上。
+ - 注意:Rancher Backups 只会处理安装了它的集群。它可能会还原 local 集群上的集群资源,但不会联系或备份下游集群。
+- 要还原的 Rancher 版本必须与备份中的 Rancher 版本匹配。
+- 由于你可能需要还原已过时的资源(要还原的 Kubernetes 版本已弃用的资源),因此你需要考虑 Kubernetes 版本。
+
+### 备份
+
+- 用户生成的某些资源不会被备份,除非这些资源可以被默认 resourceSet 捕获,或者 resourceSet 已被更改为捕获这些资源。
+ - 我们提供了一个 `resources.cattle.io/backup:true` 标签,将该标签添加到任何命名空间中的 Secret 时,命名空间将被备份。
+- 备份是不可改变的
+- 仅备份 local 集群
+
+### 还原
+
+- 还原是指将备份还原到原来的集群。可以在安装了 Rancher 的情况下进行(**必须启用 prune**),也可以在未安装 Rancher 的情况下进行(无特殊说明)。
+- 还原时需要注意的一件事是,你可能需要“擦除”集群中的所有 Rancher 资源。你可以通过将 [Rancher cleanup script](https://github.com/rancher/rancher-cleanup) 脚本作为 job 部署到集群来完成这点。这样,你可以再次安装 Rancher Backups 并还原到全新的集群。
+ - 确保使用了 kubectl 来部署脚本。
+
+### 迁移
+
+由于我们要还原到不同的集群,因此对应的迁移有一些细微差别。以下是需要记住但又容易被忘记的事情。
+
+- 迁移时 Rancher 域必须相同。换言之,你旧集群的域名现在必须指向新集群。
+- Rancher **不应该**已运行在你要迁移到的集群,这可能会导致 Rancher 备份和某些 Rancher 服务出现许多问题。
+- **还原备份后**,安装与备份**相同**的 Rancher 版本。
+- 在其他 Kubernetes 版本上配置新集群可能会出现各种不受支持的情况,这是因为可用的 Kubernetes API 可能与你备份的 API 不同。这可能会导致已弃用的资源被恢复,从而导致问题。
+- 在迁移期间**不要**执行任何升级操作。
+
+## 边缘案例和不当使用
+
+以下是 Rancher Backups 的一些**不当**使用示例。
+
+### 升级
+
+- 使用 Rancher Backups 来升级 Rancher 版本不是一个有效用法。推荐的做法是:先备份当前版本,然后按照[说明](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md)升级你的 Rancher 实例,在升级完成后再进行**另一个**备份。这样,如果升级失败,你就有一个可以用来还原的备份,而第二个备份将能用于还原到升级后的 Rancher 版本。
+- 使用 Rancher Backups 来升级 Kubernetes 版本也不是一个有效用法。由于 Kubernetes API 以及可用资源与版本相关,因此使用备份还原的方法来进行升级可能会导致资源集不对齐的问题,这些资源可能已被弃用、不受支持或已更新。升级集群版本的方式取决于其配置方式,但建议使用上述的流程(备份、升级、备份)。
+
+### ResourceSet
+
+- 由于不同团队的资源和服务会不断发展,开发人员应要注意是否需要向默认 resourceSet 添加或删除新资源。
+- Rancher Backups 仅备份默认 resourceSet 捕获的内容(除非进行编辑)。我们为用户创建的 Secret 添加了特定标签,无论 Secret 的名称是什么,无论它属于哪个命名空间,具有该标签 Secret 都会被备份(请参阅[备份的正确用法](#备份))。
+
+### 下游集群
+
+- Rancher Backups **仅**备份 local 集群上的 Kubernetes 资源。换言之,除了存在于 local 集群中的资源,下游集群**不会**被触及或备份。下游集群的更新和通信由 rancher-agent 和 rancher-webhook 负责。
+
+### 还原已删除的资源
+
+- 有些资源会产生外部结果,例如会配置下游集群。删除下游集群并还原 local 集群上的集群资源**不会**导致 Rancher 重新配置所述集群。某些资源可能无法通过还原回到可用状态。
+- “还原已删除的集群”**不是**受支持的功能。涉及下游集群时,无论集群是配置的还是导入的,删除集群都会执行一系列清理任务,导致我们无法还原已删除的集群。配置的集群节点以及与 Rancher 相关的配置资源将被销毁,而导入的集群的 Rancher Agent 以及与 local 集群注册相关的其他资源/服务可能会被销毁。
+
+:::caution
+
+尝试删除和还原下游集群可能会导致 Rancher、Rancher Backups、rancher-webhook、Fleet 等出现各种问题。因此,我们不建议你这样做。
+
+:::
+
+### Fleet、Harvester 和其他服务
+
+由 Rancher Backups 支持的其他服务会经常发生变化和发展。发生这种情况时,他们的资源和备份需求也可能会发生变化。有些资源可能根本不需要备份。团队需要在开发过程中考虑这一点,并评估相关 resourceSet 是否能正确捕获正确的资源集来还原其服务。
+
+## 结论
+
+Rancher Backups 是一个非常有用的工具,但它的使用范围和使用目的有限的。为了避免出现问题,请遵循本文所述的流程来确保 Chart 能正确运作。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
similarity index 58%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-and-disaster-recovery.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
index b7460369da2..d99c8f91f92 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/backup-restore-and-disaster-recovery.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/backup-restore-and-disaster-recovery.md
@@ -3,16 +3,20 @@ title: 备份和灾难恢复
keywords: [rancher 备份还原, rancher 备份与恢复, 备份恢复 rancher, rancher 备份与恢复 rancher]
---
+
+
+
+
在本节中,你将学习如何创建 Rancher 的备份,如何从备份中恢复 Rancher,以及如何将 Rancher 迁移到新的 Kubernetes 集群。
`rancher-backup` operator 可以用来备份和恢复任何 Kubernetes 集群上的 Rancher。这个应用是一个 Helm Chart,可以通过 Rancher 的 **Apps** 页面或使用 Helm CLI 部署。你可以访问[本页面](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup)获取 `rancher-backup` Helm Chart。
-`backup-restore` operator 需要安装在 local 集群上,并且只对 Rancher 应用进行备份。备份和恢复操作仅在本地 Kubernetes 集群中执行。
+backup-restore operator 需要安装在 local 集群上,并且只对 Rancher 应用进行备份。备份和恢复操作仅在本地 Kubernetes 集群中执行。
## 备份和恢复 Docker 安装的 Rancher
-对于使用 Docker 安装的 Rancher,请参见[备份](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md)和[恢复](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md)对 Rancher 进行备份和恢复。
+对于使用 Docker 安装的 Rancher,请参见[备份](back-up-docker-installed-rancher.md)和[恢复](restore-docker-installed-rancher.md)对 Rancher 进行备份和恢复。
## 备份和恢复原理
@@ -26,15 +30,15 @@ ResourceSet 定义了需要备份哪些 Kubernetes 资源。由于备份 Rancher
在创建 Backup 自定义资源时,`rancher-backup` operator 调用 `kube-apiserver` 来获取 Backup 自定义资源引用的 ResourceSet(即预设的 `rancher-resource-set`)资源。
-然后,operator 以 `.tar.gz` 格式创建备份文件,并将其存储在 Backup 资源中配置的位置。
+然后,operator 以 .tar.gz 格式创建备份文件,并将其存储在 Backup 资源中配置的位置。
-在创建 Restore 自定义资源时,operator 访问 Restore 指定的 `tar.gz` 备份文件,并从该文件恢复应用。
+在创建 Restore 自定义资源时,operator 访问 Restore 指定的 tar.gz 备份文件,并从该文件恢复应用。
你可以使用 Rancher UI 或 `kubectl apply` 来创建 Backup 和 Restore 自定义资源。
:::note
-请参见[此处](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md#2-使用-restore-自定义资源来还原备份)获取在 Rancher 2.6.3 中将现有备份文件恢复到 v1.22 集群的帮助。
+请参见[此处](migrate-rancher-to-new-cluster.md#2-使用-restore-自定义资源来还原备份)获取在 Rancher 2.6.3 中将现有备份文件恢复到 v1.22 集群的帮助。
:::
@@ -44,7 +48,7 @@ ResourceSet 定义了需要备份哪些 Kubernetes 资源。由于备份 Rancher
:::note
-使用 `backup-restore` operator 执行恢复后,Fleet 中会出现一个已知问题:用于 `clientSecretName` 和 `helmSecretName` 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](./fleet-gitops-at-scale.md#故障排除)获得解决方法。
+使用 backup-restore-operator 执行恢复后,Fleet 中会出现一个已知问题:用于 clientSecretName 和 helmSecretName 的密文不包含在 Fleet 的 Git 仓库中。请参见[此处](../../../integrations-in-rancher/fleet/overview.md#故障排除)获得解决方法。
:::
@@ -55,7 +59,7 @@ ResourceSet 定义了需要备份哪些 Kubernetes 资源。由于备份 Rancher
1. 在左侧导航栏中,单击 **Apps > Charts**。
1. 点击 **Rancher 备份**。
1. 单击**安装**。
-1. 可选:配置默认存储位置。如需获取帮助,请参见[配置](../reference-guides/backup-restore-configuration/storage-configuration.md)。
+1. 可选:配置默认存储位置。如需获取帮助,请参见[配置](../../../reference-guides/backup-restore-configuration/storage-configuration.md)。
1. 单击**安装**。
**结果**:`rancher-backup` operator 已安装。
@@ -75,22 +79,22 @@ ResourceSet 定义了需要备份哪些 Kubernetes 资源。由于备份 Rancher
## 备份 Rancher
-备份是通过创建 Backup 自定义资源实现的。如需查看教程,请参见[本页面](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md)。
+备份是通过创建 Backup 自定义资源实现的。如需查看教程,请参见[本页面](back-up-rancher.md)。
## 还原 Rancher
-还原是通过创建 Restore 自定义资源实现的。如需查看教程,请参见[本页面](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md)。
+还原是通过创建 Restore 自定义资源实现的。如需查看教程,请参见[本页面](restore-rancher.md)。
## 将 Rancher 迁移到新集群
-你可以按照[这些步骤](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md)执行迁移。
+你可以按照[这些步骤](migrate-rancher-to-new-cluster.md)执行迁移。
## 默认存储位置配置
-配置一个用于保存所有备份的默认存储位置。你可以选择对每个备份进行覆盖,但仅限于使用 S3 或 Minio 对象存储。
+配置一个用户保存备份文件的默认存储位置。有多种选项可以配置,例如将与 S3 兼容的对象存储作为单个备份文件的存储后端,或在安装 `backup-restore-operator` Helm Chart 时选择一个现有的 `StorageClass`。你还可以选择在每次备份时覆盖配置的存储位置,但这仅限于使用与 S3 兼容的对象存储或 Minio 对象存储。
-如需了解各个选项的配置,请参见[本页面](../reference-guides/backup-restore-configuration/storage-configuration.md)。
+如需了解各个选项的配置,请参见[本页面](../../../reference-guides/backup-restore-configuration/storage-configuration.md)。
-### rancher-backup Helm Chart 的示例 values.yaml
+### 示例 YAML 文件:Rancher Backup Helm Chart
-当使用 Helm CLI 安装时,可以使用示例 [values.yaml 文件](../reference-guides/backup-restore-configuration/storage-configuration.md#rancher-backup-helm-chart-的示例-valuesyaml) 来配置 `rancher-backup` operator。
+当使用 Helm CLI 安装时,可以使用示例 [values.yaml 文件](../../../reference-guides/backup-restore-configuration/storage-configuration.md#rancher-backup-helm-chart-的示例-valuesyaml) 来配置 `rancher-backup-operator`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
new file mode 100644
index 00000000000..5aa5ac6e523
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md
@@ -0,0 +1,184 @@
+---
+title: 将 Rancher 迁移到新集群
+---
+
+如果你要将 Rancher 迁移到一个新的 Kubernetes 集群,先不要在新集群上安装 Rancher。这是因为如果将 Rancher 还原到已安装 Rancher 的新集群,可能会导致问题。
+
+### 先决条件
+
+以下说明假设你已经完成[备份创建](back-up-rancher.md),并且已经安装了用于部署 Rancher 的新 Kubernetes 集群。
+
+:::caution
+
+你需要使用与第一个集群中设置的 Server URL 相同的主机名。否则,下游集群会在 UI 的管理页面显示为不可用,并且你不能点击集群内或集群的 **Explore** 按钮。
+
+:::
+
+Rancher 必须是 2.5.0 或更高版本。
+
+Rancher 可以安装到任意 Kubernetes 集群上,包括托管的 Kubernetes 集群(如 Amazon EKS 集群)。如需获取安装 Kubernetes 的帮助,请参见 Kubernetes 发行版的文档。你也可以使用以下 Rancher 的 Kubernetes 发行版:
+
+- [RKE Kubernetes 安装文档](https://rancher.com/docs/rke/latest/en/installation/)
+- [K3s Kubernetes 安装文档](https://rancher.com/docs/k3s/latest/en/installation/)
+
+### 1. 安装 rancher-backup Helm Chart
+安装 [rancher-backup chart](https://github.com/rancher/backup-restore-operator/tags),请使用 2.x.x 主要版本内的版本:
+
+1. 添加 helm 仓库:
+
+ ```bash
+ helm repo add rancher-charts https://charts.rancher.io
+ helm repo update
+ ```
+
+1. 使用 2.x.x rancher-backup 版本设置 `CHART_VERSION` 变量:
+ ```bash
+ helm search repo --versions rancher-charts/rancher-backup
+ CHART_VERSION=<2.x.x>
+ ```
+
+1. 安装 Chart:
+ ```bash
+ helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace --version $CHART_VERSION
+ helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system --version $CHART_VERSION
+ ```
+
+ :::note
+
+ 以上假设你的环境具有到 Docker Hub 的出站连接。
+
+ 对于**离线环境**,在安装 rancher-backup Helm Chart 时,使用下面的 Helm 值从你的私有镜像仓库中拉取 `backup-restore-operator` 镜像。
+
+ ```bash
+ --set image.repository $REGISTRY/rancher/backup-restore-operator
+ ```
+
+ :::
+
+### 2. 使用 Restore 自定义资源来还原备份
+
+:::note 重要提示:
+
+Kubernetes v1.22 是 Rancher 2.6.3 的实验功能,不支持使用 apiVersion `apiextensions.k8s.io/v1beta1`来还原包含 CRD 文件的备份文件。在 v1.22 中,`rancher-backup` 应用的默认 `resourceSet` 只收集使用 `apiextensions.k8s.io/v1` 的 CRD。你可以通过下面两种方法解决这个问题。
+
+1. 使用 apiVersion v1 来更新默认 `resourceSet`,从而收集 CRD。
+1. 使用 `apiextensions.k8s.io/v1` 作为替代,来更新默认 `resourceSet` 和客户端,从而在内部使用新的 API。
+
+ :::note
+
+ 在为 v1.22 版本制作或恢复备份时,Rancher 版本和本地集群的 Kubernetes 版本应该是一样的。由于集群中支持的 apiVersion 和备份文件中的 apiVersion 可能不同,因此在还原备份时请考虑 Kubernetes 的版本。
+
+ :::
+
+:::
+
+1. 在使用 S3 对象存储作为需要使用凭证的还原的备份源时,请在此集群中创建一个 `Secret` 对象以添加 S3 凭证。Secret 数据必须有两个密钥,分别是包含 S3 凭证的 `accessKey` 和 `secretKey`。
+
+ 你可以在任何命名空间中创建 Secret,本示例使用 default 命名空间。
+
+ ```bash
+ kubectl create secret generic s3-creds \
+ --from-literal=accessKey= \
+ --from-literal=secretKey=
+ ```
+
+ :::note
+
+ 在上面的命令中添加你的 Access Key 和 Secret Key 作为 `accessKey` 和 `secretKey` 的值。
+
+ :::
+
+1. 创建一个 `Restore` 对象:
+
+ 在迁移期间,`prune` 必须设置为 `false`。请参见下面的示例:
+
+ ```yaml
+ # restore-migration.yaml
+ apiVersion: resources.cattle.io/v1
+ kind: Restore
+ metadata:
+ name: restore-migration
+ spec:
+ backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz
+ // highlight-next-line
+ prune: false
+ // highlight-next-line
+ encryptionConfigSecretName: encryptionconfig
+ storageLocation:
+ s3:
+ credentialSecretName: s3-creds
+ credentialSecretNamespace: default
+ bucketName: backup-test
+ folder: ecm1
+ region: us-west-2
+ endpoint: s3.us-west-2.amazonaws.com
+ ```
+
+ :::note 重要提示:
+
+ 只有在创建备份时启用了加密功能时,才需要设置 `encryptionConfigSecretName` 字段。
+
+ 如果适用,请提供包含加密配置文件的 `Secret` 对象的名称。如果你只有加密配置文件,但没有在此集群中创建 Secret,请按照以下步骤创建 Secret。
+
+ 1. 创建[加密配置文件](../../../reference-guides/backup-restore-configuration/backup-configuration.md#加密)
+ 1. 下面的命令使用一个名为 `encryption-provider-config.yaml` 的文件,使用了 `--from-file` 标志。将 `EncryptionConfiguration` 保存到名为 `encryption-provider-config.yaml` 的文件中之后,运行以下命令:
+
+ ```bash
+ kubectl create secret generic encryptionconfig \
+ --from-file=./encryption-provider-config.yaml \
+ -n cattle-resources-system
+ ```
+
+ :::
+
+1. 应用清单,并监控 Restore 的状态:
+ 1. 应用 `Restore` 对象资源:
+
+ ```bash
+ kubectl apply -f restore-migration.yaml
+ ```
+
+ 1. 观察 Restore 的状态:
+ ```bash
+ kubectl get restore
+ ```
+
+ 1. 查看恢复日志:
+ ```bash
+ kubectl logs -n cattle-resources-system --tail 100 -f -l app.kubernetes.io/instance=rancher-backup
+ ```
+
+ 1. Restore 资源的状态变成 `Completed` 后,你可以继续安装 cert-manager 和 Rancher。
+
+### 3. 安装 cert-manager
+
+按照在 Kubernetes 上安装 cert-manager的步骤[安装 cert-manager](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#4-安装-cert-manager)。
+
+### 4. 使用 Helm 安装 Rancher
+
+使用与第一个集群上使用的相同版本的 Helm 来安装 Rancher:
+
+对于 Kubernetes v1.25 或更高版本,使用 Rancher v2.7.2-v2.7.4 时,将 `global.cattle.psp.enabled` 设置为 `false`。对于 Rancher v2.7.5 及更高版本来说,这不是必需的,但你仍然可以手动设置该选项。
+
+```bash
+helm install rancher rancher-latest/rancher \
+ --namespace cattle-system \
+ --set hostname= \
+ --version x.y.z
+```
+
+:::note
+
+如果原始的 Rancher 环境正在运行,你可以使用 kubeconfig 为原始环境收集当前值:
+
+```bash
+helm get values rancher -n cattle-system -o yaml > rancher-values.yaml
+```
+
+你可以使用 `rancher-values.yaml` 文件来复用这些值。确保将 kubeconfig 切换到新的 Rancher 环境。
+
+```bash
+helm install rancher rancher-latest/rancher -n cattle-system -f rancher-values.yaml --version x.y.z
+```
+
+:::
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md
new file mode 100644
index 00000000000..b491321f2bf
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md
@@ -0,0 +1,72 @@
+---
+title: 恢复备份 - Docker 安装
+---
+
+如果遇到灾难情况,你可以将 Rancher Server 恢复到最新的备份。
+
+## 在你开始前
+
+在恢复备份的过程中,你将输入一系列命令。请使用环境中的数据替换占位符。占位符用尖括号和大写字母(如 ``)表示。以下是带有占位符的命令示例:
+
+```
+docker run --volumes-from -v $PWD:/backup \
+busybox sh -c "rm /var/lib/rancher/* -rf && \
+tar pzxvf /backup/rancher-data-backup--"
+```
+
+在此命令中,`` 和 `-` 是用于 Rancher 部署的环境变量。
+
+请交叉参考下方的图片和表格,了解获取此占位符数据的方法。在开始以下步骤之前,请先记下或复制此信息。
+
+终端 docker ps 命令,显示如何找到 <RANCHER_CONTAINER_TAG> 和 <RANCHER_CONTAINER_NAME>
+
+
+
+| 占位符 | 示例 | 描述 |
+| -------------------------- | -------------------------- | --------------------------------------------------------- |
+| `` | `v2.0.5` | 首次安装拉取的 rancher/rancher 镜像。 |
+| `` | `festive_mestorf` | 你的 Rancher 容器的名称。 |
+| `` | `v2.0.5` | Rancher 备份的版本号。 |
+| `` | `9-27-18` | 数据容器或备份的创建日期。 |
+
+
+可以通过远程连接登录到 Rancher Server 所在的主机并输入命令 `docker ps` 以查看正在运行的容器,从而获得 `` 和 ``。你还可以运行 `docker ps -a` 命令查看停止了的容器。在创建备份期间,你随时可以运行这些命令来获得帮助。
+
+## 恢复备份
+
+使用你之前创建的[备份](back-up-docker-installed-rancher.md),将 Rancher 恢复到最后已知的健康状态。
+
+1. 使用远程终端连接,登录到运行 Rancher Server 的节点。
+
+1. 停止当前运行 Rancher Server 的容器。将 `` 替换为 Rancher 容器的名称:
+
+ ```
+ docker stop
+ ```
+1. 将你在[创建备份 - Docker 安装](back-up-docker-installed-rancher.md)时创建的备份压缩包移动到 Rancher Server。切换到你将其移动到的目录。输入 `dir` 以确认它在该位置。
+
+ 如果你遵循了我们在[创建备份 - Docker 安装](back-up-docker-installed-rancher.md)中推荐的命名方式,它的名称会与 `rancher-data-backup--.tar.gz` 类似。
+
+1. 输入以下命令删除当前状态数据,并将其替换为备份数据。请替换占位符。不要忘记关闭引号。
+
+ :::danger
+
+ 该命令将删除 Rancher Server 容器中的所有当前状态数据。创建备份压缩包后保存的任何更改都将丢失。
+
+ :::
+
+ ```
+ docker run --volumes-from -v $PWD:/backup \
+ busybox sh -c "rm /var/lib/rancher/* -rf && \
+ tar pzxvf /backup/rancher-data-backup--.tar.gz"
+ ```
+
+ **步骤结果**:屏幕上将运行命令流。
+
+1. 重新启动 Rancher Server 容器,替换占位符。Rancher Server 将使用你的备份数据重新启动。
+
+ ```
+ docker start
+ ```
+
+1. 等待片刻,然后在浏览器中打开 Rancher。确认还原成功,并且你的数据已恢复。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
new file mode 100644
index 00000000000..97e23f81446
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md
@@ -0,0 +1,129 @@
+---
+title: 使用备份恢复集群
+---
+
+你可以轻松备份和恢复 [Rancher 启动的 Kubernetes 集群](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md)的 etcd。etcd 数据库的快照会保存在 etcd 节点或 S3 兼容目标上。配置 S3 的好处是,如果所有 etcd 节点都丢失了,你的快照会保存到远端并能用于恢复集群。
+
+Rancher 建议启用 [etcd 定期快照的功能](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照),但你也可以轻松创建[一次性快照](back-up-rancher-launched-kubernetes-clusters.md#单次快照)。Rancher 允许使用[保存的快照](#使用快照恢复集群)进行恢复。如果你没有任何快照,你仍然可以[恢复 etcd](#在没有快照的情况下恢复-etcdrke)。
+
+集群也可以恢复到之前的 Kubernetes 版本和集群配置。
+
+## 查看可用快照
+
+Rancher UI 中提供了集群所有可用快照的列表:
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,转到要查看快照的集群并点击集群名称。
+1. 单击**快照**选项卡。列出的快照包括创建时间的时间戳。
+
+## 使用快照恢复集群
+
+如果你的 Kubernetes 集群已损坏,你可以使用快照来恢复集群。
+
+快照由 etcd 中的集群数据、Kubernetes 版本和 `cluster.yml` 中的集群配置组成。有了这些组件,你可以在使用快照恢复集群时选择:
+
+- **仅恢复 etcd 内容**:类似于在 Rancher v2.4.0 之前版本中的使用快照恢复。
+- **恢复 etcd 和 Kubernetes 版本**:如果 Kubernetes 升级导致集群失败,并且你没有更改任何集群配置,则应使用此选项。
+- **恢复 etcd、Kubernetes 版本和集群配置**:如果你在升级时同时更改了 Kubernetes 版本和集群配置,则应使用此选项。
+
+回滚到之前的 Kubernetes 版本时,[升级策略选项](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md#配置升级策略)会被忽略。在恢复到旧 Kubernetes 版本之前,Worker 节点不会被封锁或清空,因此可以更快地将不健康的集群恢复到健康状态。
+
+:::note 先决条件:
+
+要恢复 S3 中的快照,需要将集群配置为[在 S3 上创建定期快照](back-up-rancher-launched-kubernetes-clusters.md#配置定期快照)。
+
+:::
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面中,转到要查看快照的集群并点击集群名称。
+1. 单击**快照**选项卡来查看已保存快照的列表。
+1. 转到要恢复的快照,然后单击 **⋮ > 还原**。
+1. 选择一个**还原类型**。
+1. 点击**还原**。
+
+**结果**:集群将进入 `updating` 状态,然后将开始使用快照恢复 `etcd` 节点。集群会在返回到 `active` 状态后被恢复。
+
+## Control Plane/etcd 完全不可用时使用快照还原集群
+
+在灾难恢复场景中,下游集群中 Rancher 管理的 Control Plane 和 etcd 节点可能不再可用或运行。你可以通过再次添加 Control Plane 和 etcd 节点来重建集群,然后使用可用快照来进行还原。
+
+
+
+
+请按照 [SUSE 知识库](https://www.suse.com/support/kb/doc/?id=000020695)中描述的流程进行操作。
+
+
+
+
+如果集群完全故障,则必须从集群中删除所有 etcd 节点和机器,然后才能添加新的 etcd 节点来进行还原。
+
+:::note
+
+由于[已知问题](https://github.com/rancher/rancher/issues/41080),此过程需要 Rancher v2.7.5 或更高版本。
+
+:::
+
+:::note
+
+如果你使用[本地快照](./back-up-rancher-launched-kubernetes-clusters.md#本地备份目标),那么请**务必**从要删除的 etcd 节点上的 `/var/lib/rancher//server/db/snapshots/` 文件夹中备份要还原的快照。你可以将快照复制到 `/var/lib/rancher//server/db/snapshots/` 文件夹中的新节点上。此外,如果使用本地快照并还原到新节点,目前还无法通过 UI 进行还原。
+
+:::
+
+1. 从集群中删除所有 etcd 节点。
+
+ 1. 在左上角,单击 **☰ > 集群管理**。
+ 1. 在**集群**页面中,转到要删除节点的集群。
+ 1. 在**主机**选项卡中,找到要删除的每个节点并单击 **⋮ > 删除**。开始时,节点会挂在 `deleting` 状态,所有 etcd 节点都被删除后,它们将被一起删除。这是因为 Rancher 发现所有 etcd 节点都在删除,并开始 "短路" etcd 安全删除逻辑。
+
+1. 删除所有 etcd 节点后,添加要用于还原的新 etcd 节点。
+
+ - 对于自定义集群,请转到**注册**选项卡,然后在节点上复制并运行注册命令。如果该节点之前已在集群中使用过,请先[清理该节点](../manage-clusters/clean-cluster-nodes.md#清理节点)。
+ - 对于主机驱动集群,则会自动配置新节点。
+
+ 此时,Rancher 会提示你需要使用 etcd 快照进行还原。
+
+1. 使用 etcd 快照还原。
+
+ - 对于 S3 快照,使用 UI 进行还原。
+ 1. 单击**快照**选项卡来查看已保存快照的列表。
+ 1. 转到要恢复的快照,然后单击 **⋮ > 还原**。
+ 1. 选择一个**还原类型**。
+ 1. 点击**还原**。
+ - 对于本地快照,使用 UI 进行还原**不**可用。
+ 1. 单击右上角的 **⋮> 编辑 YAML**。
+ 1. 将 `spec.cluster.rkeConfig.etcdSnapshotRestore.name` 定义为 `/var/lib/rancher//server/db/snapshots/` 中磁盘上快照的文件名。
+
+1. 还原成功后,你可以将 etcd 节点扩展至所需的冗余。
+
+
+
+
+## 在没有快照的情况下恢复 etcd(RKE)
+
+如果 etcd 节点组失去了仲裁(quorum),由于没有操作(例如部署工作负载)可以在 Kubernetes 集群中执行,Kubernetes 集群将报告失败。集群需要有三个 etcd 节点以防止仲裁丢失。如果你想恢复你的 etcd 节点集,请按照以下说明操作:
+
+1. 通过删除所有其他 etcd 节点,从而仅在集群中保留一个 etcd 节点。
+
+2. 在剩余的单个 etcd 节点上,运行以下命令:
+
+ ```bash
+ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd
+ ```
+
+ 此命令会输出 etcd 要运行的命令,请保存此命令以备后用。
+
+3. 停止正在运行的 `etcd` 容器并将其重命名为 `etcd-old`。
+
+ ```bash
+ docker stop etcd
+ docker rename etcd etcd-old
+ ```
+
+4. 修改步骤 2 中获取保存的命令:
+
+ - 如果你最初有超过 1 个 etcd 节点,则将 `--initial-cluster` 更改为仅包含剩余的单个节点。
+ - 将 `--force-new-cluster` 添加到命令的末尾。
+
+5. 运行修改后的命令。
+
+6. 在单个节点启动并运行后,Rancher 建议向你的集群添加额外的 etcd 节点。如果你有一个[自定义集群](../../../pages-for-subheaders/use-existing-nodes.md),并且想要复用旧节点,则需要先[清理节点](../manage-clusters/clean-cluster-nodes.md),然后再尝试将它们重新添加到集群中。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
new file mode 100644
index 00000000000..eeb6cb521f0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md
@@ -0,0 +1,82 @@
+---
+title: 还原 Rancher
+---
+
+本页概述了如何使用 Rancher 执行恢复。
+
+在以下情况下,请按照本页中的说明进行操作:
+- 正在运行的 Rancher 实例与备份时的版本相同。
+- 上游(本地)集群与备份的位置相同。
+
+:::note 重要提示
+
+在使用相同设置还原 Rancher 时,operator 将在还原开始时缩减 Rancher deployment,还原完成后又会扩展 deployment。因此,Rancher 在还原期间将不可用。
+
+:::
+
+:::tip
+
+* 按照以下步骤[迁移 Rancher](migrate-rancher-to-new-cluster.md)。
+* 如果你需要在升级后将 Rancher 还原到先前版本,请参见[回滚](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md)。
+
+:::
+
+### 创建 Restore 自定义资源
+
+还原是通过创建 Restore 自定义资源实现的。
+
+1. 在左上角,单击 **☰ > 集群管理**。
+1. 在**集群**页面上,转到 `local` 集群并单击 **Explore**。Rancher Server 运行在 `local` 集群中。
+1. 在左侧导航栏中,单击 **Rancher 备份 > 还原**。
+1. 单击**创建**。
+1. 使用表单或 YAML 创建 Restore。如需获取使用表单创建 Restore 资源的更多信息,请参见[配置参考](../../../reference-guides/backup-restore-configuration/restore-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md)。
+1. 要使用 YAML 编辑器,单击**创建 > 使用 YAML 文件创建**。输入 Restore YAML。
+
+ ```yaml
+ apiVersion: resources.cattle.io/v1
+ kind: Restore
+ metadata:
+ name: restore-migration
+ spec:
+ backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz
+ encryptionConfigSecretName: encryptionconfig
+ storageLocation:
+ s3:
+ credentialSecretName: s3-creds
+ credentialSecretNamespace: default
+ bucketName: rancher-backups
+ folder: rancher
+ region: us-west-2
+ endpoint: s3.us-west-2.amazonaws.com
+ ```
+
+ 如需获得配置 Restore 的帮助,请参见[配置参考](../../../reference-guides/backup-restore-configuration/restore-configuration.md)和[示例](../../../reference-guides/backup-restore-configuration/examples.md)。
+
+1. 单击**创建**。
+
+**结果**:rancher-operator 在还原过程中将 Rancher deployment 缩容,并在还原完成后将它重新扩容。资源还原顺序如下:
+
+1. 自定义资源定义(CRD)
+2. 集群范围资源
+3. 命名空间资源
+
+### 日志
+
+如需查看还原的处理方式,请检查 Operator 的日志。查看日志的命令如下:
+
+```
+kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f
+```
+
+### 清理
+
+如果你使用 kubectl 创建了 Restore 资源,请删除该资源以防止与未来的还原发生命名冲突。
+
+### 已知问题
+在某些情况下,恢复备份后,Rancher 日志会显示类似以下的错误:
+```
+2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing
+```
+发生这种情况的原因是,刚刚还原的某个资源有 finalizer,但相关的资源已经被删除,导致处理程序无法找到该资源。
+
+为了消除这些错误,你需要找到并删除导致错误的资源。详情请参见[此处](https://github.com/rancher/rancher/issues/35050#issuecomment-937968556)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md
new file mode 100644
index 00000000000..b6605e51965
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md
@@ -0,0 +1,126 @@
+---
+title: 创建应用
+---
+
+:::tip
+
+有关开发 Chart 的完整演示,请参阅 Helm 官方文档中的 [Chart 模板开发者指南](https://helm.sh/docs/chart_template_guide/)。
+
+:::
+
+## Chart 类型
+
+Rancher 支持两种不同类型的 Chart,分别是 Helm Chart 和 Rancher Chart。
+
+### Helm Chart
+
+原生 Helm Chart 包括一个应用以及运行它所需的软件。部署原生 Helm Chart 时,你可以在 YAML 编辑器中提供 Chart 的参数值。
+
+### Rancher Chart
+
+Rancher Chart 是原生 helm Chart,包含两个可增强用户体验的文件 `app-readme.md` 和 `questions.yaml`。在 [Rancher Chart 的附加文件](#rancher-chart-的附加文件)中了解更多信息。
+
+Rancher Chart 添加了简化的 Chart 描述和配置表单,使应用部署变得容易。Rancher 用户无需通读整个 Helm 变量列表即可了解如何启动应用。
+
+## Chart 目录结构
+
+你可以在基于 HTTP 的标准 Helm 仓库中提供 Helm Chart。有关更多信息,请参阅 Helm 官方文档中的 [Chart 仓库指南](https://helm.sh/docs/topics/chart_repository)。
+
+或者,你可以在 Git 仓库中组织 Chart 并将其直接添加到应用市场。
+
+下表演示了 Git 仓库的目录结构。`charts` 目录是仓库基础下的顶层目录。将仓库添加到 Rancher 将公开其中包含的所有 Chart。`questions.yaml`、`README.md` 和 `requirements.yml` 文件是针对于 Rancher Chart 的,但对于自定义 Chart 是可选的。
+
+```
+/
+ │
+ ├── charts/
+ │ ├── / # 这个目录名称将作为 Chart 名称出现在 Rancher UI 中。
+ │ │ ├── / # 这个级别的每个目录提供了不同的应用版本,可以在 Rancher UI 的 Chart 中选择。
+ │ │ │ ├── Chart.yaml # 必需的 Helm Chart 信息文件。
+ │ │ │ ├── questions.yaml # 在 Rancher UI 中显示的表单问题。问题显示在配置选项中。*
+ │ │ │ ├── README.md # 可选:在 Rancher UI 中显示的 Helm 自述文件。此文本显示在详细说明中。
+ │ │ │ ├── requirements.yml # 可选:列出 Chart 的依赖项的 YAML 文件。
+ │ │ │ ├── values.yml # Chart 的默认配置值。
+ │ │ │ ├── templates/ # 包含模板的目录,与 values.yml 一起能生成 Kubernetes YAML。
+```
+
+## Rancher Chart 的附加文件
+
+在创建你的自定义目录之前,你需要大致了解 Rancher chart 与原生 Helm chart 的区别。Rancher Chart 的目录结构与 Helm Chart 略有不同。Rancher Chart 包含两个 Helm Chart 没有的文件:
+
+- `app-readme.md`
+
+ 在 Chart 的 UI 标头中提供描述性文本的文件。
+
+- `questions.yml`
+
+ 包含表单问题的文件。这些表单问题简化了 Chart 的部署。没有它,你必须使用更复杂的 YAML 配置来配置部署。下图显示了 Rancher Chart(包含 `questions.yml`)和原生 Helm Chart(不包含)之间的区别。
+
+带有 questions.yml 的 Rancher Chart(上)与 Helm Chart(下)
+
+ 
+ 
+
+
+### Chart.yaml 注释
+
+Rancher 支持你添加到 `Chart.yaml` 文件的其他注释。这些注释允许你定义应用依赖项或配置其他 UI 默认值:
+
+| 注解 | 描述 | 示例 |
+| --------------------------------- | ----------- | ------- |
+| catalog.cattle.io/auto-install | 如果设置,将在安装此 Chart 之前先安装指定 Chart 的指定版本。 | other-chart-name=1.0.0 |
+| catalog.cattle.io/display-name | 要在应用市场中显示的名称,而不是 Chart 本身的名称。 | Chart 的显示名称 |
+| catalog.cattle.io/namespace | 用于部署 Chart 的固定命名空间。如果设置,则用户无法更改。 | fixed-namespace |
+| catalog.cattle.io/release-name | Helm 安装的固定版本名称。如果设置,则用户无法更改。 | fixed-release-name |
+| catalog.cattle.io/requests-cpu | 应该在集群中保留的 CPU 总量。如果可用 CPU 资源少于该值,将显示警告。 | 2000m |
+| catalog.cattle.io/requests-memory | 应该在集群中保留的内存总量。如果可用内存资源少于该值,将显示警告。 | 2Gi |
+| catalog.cattle.io/os | 限制可以安装此 Chart 的操作系统。可用值:`linux`、`windows`。默认:无限制 | linux |
+
+### questions.yml
+
+`questions.yml` 中大部分是向最终用户提出的问题,但也有一部分可以在此文件中设置字段。
+
+### 最低/最高 Rancher 版本
+
+你可以为每个 Chart 添加最低和/或最高的 Rancher 版本,这决定了该 Chart 是否可以从 Rancher 部署。
+
+:::note
+
+Rancher 版本带有 `v` 前缀,但是使用此选项时请*不要*包括前缀。
+
+:::
+
+```
+rancher_min_version: 2.3.0
+rancher_max_version: 2.3.99
+```
+
+### Question 变量参考
+
+此参考包含可以嵌套在 `questions:` 下的 `questions.yml` 中的变量:
+
+| 变量 | 类型 | 必填 | 描述 |
+| ------------- | ------------- | --- |------------- |
+| variable | string | true | 定义 `values.yml` 文件中指定的变量名,嵌套对象使用 `foo.bar`。 |
+| label | string | true | 定义 UI 标签。 |
+| description | string | false | 指定变量的描述。 |
+| type | string | false | 如果未指定,则默认为 `string`(支持的类型为 string、multiline、boolean、int、enum、password、storageclass、hostname、pvc 和 secret)。 |
+| required | bool | false | 定义变量是否是必须的(true \| false)。 |
+| default | string | false | 指定默认值。仅在 `values.yml` 文件中没有对应值时使用。 |
+| group | string | false | 按输入值对问题进行分组。 |
+| min_length | int | false | 最小字符长度。 |
+| max_length | int | false | 最大字符长度。 |
+| min | int | false | 最小整数长度。 |
+| max | int | false | 最大整数长度。 |
+| options | []string | false | 为 `enum` 类型的变量指定选项,例如:options: - "ClusterIP" - "NodePort" - "LoadBalancer" |
+| valid_chars | string | false | 输入字符验证的正则表达式。 |
+| invalid_chars | string | false | 无效输入字符验证的正则表达式。 |
+| subquestions | []subquestion | false | 添加一组子问题。 |
+| show_if | string | false | 如果条件变量为 true,则显示当前变量。例如 `show_if: "serviceType=Nodeport"` |
+| show\_subquestion_if | string | false | 如果为 true 或等于某个选项,则显示子问题。例如 `show_subquestion_if: "true"` |
+
+:::note
+
+`subquestions[]` 不能包含 `subquestions` 或 `show_subquestions_if` 键,但支持上表中的所有其他键。
+
+:::
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md
new file mode 100644
index 00000000000..48ecc783bee
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md
@@ -0,0 +1,216 @@
+---
+title: Helm Charts 和 Apps
+---
+
+
+
+
+
+在本节中,你将学习如何在 Rancher 中管理 Helm Chart 仓库和应用。
+
+## Helm Charts 在 Rancher 中的工作原理
+
+在 Rancher 中 Helm chart 仓库是使用 **Apps** 进行管理的。
+
+Rancher 使用应用商店系统导入一系列的 charts 包到仓库里,然后使用这些 charts 来部署自定义的 Kubernetes 应用程序或 Rancher 的工具,如监控(Monitoring)或 Istio。Rancher 工具预先集成进了仓库,并通过独立的 Helm chart 进行部署。如果你有自己的额外仓库只需要添加到当前的集群中就可以正常部署。
+
+### Catalogs, Apps, Rancher UI
+
+[在 Rancher v2.4 及更早版本中](/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/helm-charts-in-rancher.md), 存储部署的应用程序的仓库被称为 "catalogs"。这些仓库是通过 UI 的 **Catalogs** 进行管理的。
+
+Rancher v2.5 用新的 **Apps & Marketplace** 功能替代了之前的 **应用商店**。
+
+从 Rancher v2.6.5 开始, **Apps & Marketplace** 功能在 UI 中被命名为 **Apps**。
+
+### Versioning Scheme
+
+Rancher chart 的版本设计方案主要围绕 charts 的主要版本以及与上游 charts 相关的 `+up` 注释进行设计。
+
+**主要版本:** charts 的主要版本与 Rancher 的特定次要版本相关联。当你升级到新的 Rancher 次要版本时,你应该确保所有的功能 charts 也被升级对应的主版本。
+
+**基于上游的 Charts:** 在升级时,确保上游 chart 的版本与你的 Rancher 版本兼容。chart 的 `+up` 注释指示 Rancher chart 正在跟踪的上游版本。例如,`100.x.x+up16.6.0` 的监控(Monitoring)跟踪上游的 kube-prometheus-stack `16.6.0` 并附带一些额外的 Rancher 补丁。
+
+在升级 Rancher 版本时,不要降级你正在使用的 chart 版本。例如,如果你在 Rancher v2.5 中使用的监控版本比 `16.6.0` 还要高,你不应该升级到 `100.x.x+up16.6.0`,而是应该在下一个发布中升级到适当的版本。
+
+#### 预发布版本
+
+预发布版本遵循[语义化版本 2.0.0 ](https://semver.org/)的[规范](https://semver.org/#spec-item-9)。 例如,版本为 `0.1.3-dev.12ab4f` 的 Helm chart 被认为是一个预发布版本。预发布版本默认不显示,必须进行配置以显示。
+
+如何显示预发布版本:
+
+1. 点击右上角的用户头像。
+2. 点击 **Preferences**.
+3. 在 **Helm Charts** 下, 选择 **Include Prerelease Versions**.
+
+### 版本控制方案
+
+| **Name** | **支持的最低版本** | **支持的最高版本** |
+| ---------------- | ------------ | ------------ |
+| external-ip-webhook | 100.0.0+up1.0.0 | 100.0.1+up1.0.1 |
+| harvester-cloud-provider | 100.0.2+up0.1.12 | 100.0.2+up0.1.12 |
+| harvester-csi-driver | 100.0.2+up0.1.11 | 100.0.2+up0.1.11 |
+| neuvector | 100.0.0+up2.2.0 | 100.0.0+up2.2.0 |
+| rancher-alerting-drivers | 100.0.0 | 100.0.2 |
+| rancher-backup | 2.0.1 | 2.1.2 |
+| rancher-cis-benchmark | 2.0.1 | 2.0.4 |
+| rancher-gatekeeper | 100.0.0+up3.6.0 | 100.1.0+up3.7.1 |
+| rancher-istio | 100.0.0+up1.10.4 | 100.3.0+up1.13.3 |
+| rancher-logging | 100.0.0+up3.12.0 | 100.1.2+up3.17.4 |
+| rancher-longhorn | 100.0.0+up1.1.2 | 100.1.2+up1.2.4 |
+| rancher-monitoring | 100.0.0+up16.6.0 | 100.1.2+up19.0.3 |
+| rancher-sriov (experimental) | 100.0.0+up0.1.0 | 100.0.3+up0.1.0 |
+| rancher-vsphere-cpi | 100.3.0+up1.2.1 | 100.3.0+up1.2.1 |
+| rancher-vsphere-csi | 100.3.0+up2.5.1-rancher1 | 100.3.0+up2.5.1-rancher1 |
+| rancher-wins-upgrader | 0.0.100 | 100.0.1+up0.0.1 |
+
+## 访问 Charts
+
+**Charts** 页面包含所有 Rancher、Rancher 合作伙伴和自定义 Chart。你可以通过选择左侧的下拉菜单来筛选 Chart:
+
+* Rancher 工具(例如 Logging 或 Monitoring)包含在 **Rancher** 标签下
+* Partner Chart 位于 **Partner** 标签下
+* 自定义 Chart 将显示在仓库的名称下
+
+所有这三种类型都以相同的方式部署和管理。
+
+:::note
+由 Cluster Manager (旧版 Rancher UI 中的全局视图)管理的应用应继续仅由 Cluster Manager 管理,而在新 UI 中使用 Apps 管理的应用则仅能由 Apps 管理。
+:::
+
+访问 **Charts** 页面:
+
+1. 点击 **☰ > Cluster Management**。
+2. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+3. 在 **Cluster Dashboard** 的左侧导航菜单中, 点击 **Apps > Charts**。
+
+### 管理仓库
+
+**Repositories** 页面列出了你的 Helm 仓库。 这包括传统的具有 index.yaml 的 Helm 端点,以及克隆并指向特定分支的 Git 仓库。要使用自定义 Charts,在这里可以添加你的仓库。添加仓库后,你可以在 **Charts** 页面中访问自定义 Charts,这些 Charts 将列在仓库的名称下。
+
+访问 **Repositories** 页面:
+
+1. 点击 **☰ > Cluster Management**.
+2. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+3. 在 **Cluster Dashboard** 的左侧导航菜单中,点击 **Apps > Repositories**。
+
+### 添加自定义 Git 仓库
+
+要添加一个包含你的 Helm Charts 或集群模板定义的自定义 Git 仓库:
+
+1. 点击 **☰ > Cluster Management**。
+2. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+3. 在 **Cluster Dashboard** 的左侧导航菜单中,点击 **Apps > Repositories**。
+4. 点击 **Create**。
+5. 选择 **Git repository containing Helm chart...**。
+6. 你必须输入名称和 Git 仓库的 URL。其他配置项包括描述,都是可选的。如果你不想设置默认的分支,可以输入你想要使用的分支名称。通常,默认分支名为 `main` 或 `master`。
+7. 点击 **Create** 添加。
+
+在 Rancher 中添加 Charts 仓库后,它将立即生效。
+
+### 添加自定义 Helm Chart 仓库
+
+你可以将自己的 Helm chart 仓库添加到 Rancher。为了能正确添加 http 的 Helm Chart 仓库,你需要提供 chart 的服务器并能够响应 GET 请求并提供 YAML 文件和 tar 包。
+
+有关 Helm chart 仓库的更多信息,请参阅 [官方 Helm 文档](https://helm.sh/docs/topics/chart_repository/)。
+
+要将自定义 Helm chart 仓库添加到 Rancher:
+
+1. 点击 **☰ > Cluster Management**。
+2. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+3. 在 **Cluster Dashboard** 的左侧导航菜单中,点击 **Apps > Repositories**。
+4. 点击 **Create**。
+5. 选择 **http(s) URL to an index generated by Helm**.
+6. 输入仓库名称和 chart 的 index URL 地址。
+7. 点击 **Create** 添加。
+
+### 添加私有 Git/Helm Chart 仓库
+
+你可以使用 SSH 密钥凭据或 HTTP 基础认证秘密(如用户名和密码)添加私有 Git 或 Helm chart 仓库。
+
+### 向仓库添加私有 CA
+
+向 Helm chart 仓库添加私有 CA,你必须将 DER 格式的 CA 证书的 base64 编码副本添加到 Chart 仓库的 `spec.caBundle 字段`,例如 `openssl x509 -outform der -in ca.pem | base64 -w0`。无论是基于 Git 还是 HTTP 的仓库,操作步骤都是相同的
+
+1. 点击 **☰**。在左侧导航菜单的 **Explore Cluster**, 选择一个集群。
+2. 在 **Cluster Dashboard** 的左侧导航菜单中,点击 **Apps > Repositories**。
+3. 找到你想要向其添加私有 CA 证书的 Git 或 HTTP 的仓库。点击 **⋮ > Edit YAML**。
+4. 设置 `caBundle` 值,如以下示例:
+
+ ```yaml
+ [...]
+ spec:
+ caBundle:
+ MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT
+ ...
+ nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4=
+ [...]
+ ```
+
+:::note 带有认证的 Helm chart 仓库
+
+Repo.Spec 包含一个 `disableSameOriginCheck` 值,该值允许用户绕过相同源的检查,将仓库身份认证信息作为基本 Auth 标头与所有 API 调用一起发送。不建议采用这种做法,但这可以用作非标准 Helm Chart 仓库(例如重定向到不同源 URL 的仓库)的临时解决方案。
+
+要将此功能用于现有 Helm Chart 仓库,请按照前面的步骤编辑 YAML。在 YAML 文件的 `spec` 部分,添加 `disableSameOriginCheck` 并将其设置为 `true`:
+
+```yaml
+[...]
+spec:
+ disableSameOriginCheck: true
+[...]
+```
+
+:::
+
+### Helm 兼容性
+
+仅支持 Helm 3 兼容 Chart 。
+
+### 部署和升级 Chart
+
+安装和部署 chart:
+
+1. 点击 **☰ > Cluster Management**。
+2. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+3. 在 **Cluster Dashboard** 的左侧导航菜单中,点击 **Apps > Charts**。
+4. 选择一个 chart 点击 **Install**。
+
+Rancher 和 Partner Chart 可能通过自定义页面或 questions.yaml 文件进行额外的配置,但所有 Chart 安装都可以修改 values.yaml 和其他基本设置。单击安装后,将部署一个 Helm 操作作业,并显示该作业的控制台。
+
+要查看所有最近的更改,点击左侧导航栏菜单中的 **APPs > Recent Operations**。你可以查看已进行的调用、条件、事件和日志
+
+安装 Chart 后,点击左侧导航栏菜单中的 **Apps > Installed Apps**。在本节中,你可以升级或删除安装,并查看更多详细信息。选择升级时,呈现的形式和数值与安装相同。
+
+大多数 Rancher 工具在 **Apps** 下方的工具栏中都有额外的页面,以帮助你管理和使用这些功能。这些页面包括指向仪表板的链接、可轻松添加自定义资源的表单以及其他信息。
+
+:::caution
+
+如果在升级前使**自定义 Helm 选项**,并且你的 Chart 中包含不可更改的字段,使用 `--force` 选项可能会导致错误。这是因为 Kubernetes 中的某些对象一旦创建就无法更改。要避免该错误,你可以:
+
+* 使用默认升级选项(即不要使用 `--force` 选项)
+* 卸载现有 Chart 并安装升级后的 Chart
+* 在执行强制升级之前删除集群中具有不可更改字段的资源
+
+:::
+
+#### 旧版应用
+
+在 **Apps > Installed Apps** 页面上,旧版应用没有升级按钮。
+
+如果你想升级已安装的旧版应用, 必须启用[旧版功能](../../advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。 如果你在升级 Rancher 之前已经运行了旧版应用,此标志会自动启用。
+
+1. 如果尚未启用,启用[旧版功能](../../advanced-user-guides/enable-experimental-features/enable-experimental-features.md)。
+2. 点击 **☰ > Cluster Management**。
+3. 找到你想要访问 Charts 的集群名称。点击集群行末尾的 **Explore**。
+4. 点击 **Legacy > Project > Apps**.
+
+如果在 **Legacy > Project** 下看不到 **Apps** ,请点击顶部导航栏的 **project/namespace** 搜索栏,并从下拉菜单中选择相关项目。
+
+要升级旧版多集群应用:
+
+1. 点击 **☰**.
+2. 在 **Legacy Apps** 下点击 **Multi-cluster Apps**.
+
+
+### 限制
+
+Rancher CLI 不能用于安装仪表板应用程序或 Rancher 功能 Chart。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md
new file mode 100644
index 00000000000..6486e4fc4ca
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md
@@ -0,0 +1,175 @@
+---
+title: 设置 Amazon NLB 网络负载均衡器
+---
+
+本文介绍了如何在 Amazon EC2 服务中设置 Amazon NLB 网络负载均衡器,用于将流量转发到 EC2 上的多个实例中。
+
+这些示例中,负载均衡器将流量转发到三个 Rancher Server 节点。如果 Rancher 安装在 RKE Kubernetes 集群上,则需要三个节点。如果 Rancher 安装在 K3s Kubernetes 集群上,则只需要两个节点。
+
+本文介绍的只是配置负载均衡器的其中一种方式。其他负载均衡器如传统负载路由器(Classic Load Balancer)和应用负载路由器(Application Load Balancer),也可以将流量转发到 Rancher Server 节点。
+
+Rancher 仅支持使用 Amazon NLB 以 `TCP` 模式终止 443 端口的流量,而不支持 `TLS` 模式。这试因为在 NLB 终止时,NLB 不会将正确的标头注入请求中。如果你想使用由 Amazon Certificate Manager (ACM) 托管的证书,请使用 ALB。
+
+
+
+## 要求
+
+你已在 EC2 中创建了 Linux 实例。此外,负载均衡器会把流量转发到这些节点。
+
+## 1. 创建目标组
+
+首先,为 **TCP** 协议创建两个目标组,其中一个使用 TCP 端口 443,另一个使用 TCP 端口 80(用于重定向到 TCP 端口 443)。然后,将 Linux 节点添加到这些组中。
+
+配置 NLB 的第一个步骤是创建两个目标组。一般来说,只需要端口 443 就可以访问 Rancher。但是,由于端口 80 的流量会被自动重定向到端口 443,因此,你也可以为端口 80 也添加一个监听器。
+
+不管使用的是 NGINX Ingress 还是 Traefik Ingress Controller,Ingress 都应该将端口 80 的流量重定向到端口 443。以下为操作步骤:
+
+1. 登录到 [Amazon AWS 控制台](https://console.aws.amazon.com/ec2/)。确保选择的**区域**是你创建 EC2 实例 (Linux 节点)的区域。
+1. 选择**服务** > **EC2**,找到**负载均衡器**并打开**目标组**。
+1. 单击**创建目标组**,然后创建用于 TCP 端口 443 的第一个目标组。
+
+:::note
+
+不同 Ingress 的健康检查处理方法不同。详情请参阅[本节](#nginx-ingress-和-traefik-ingress-的健康检查路径)。
+
+:::
+
+#### 目标组(TCP 端口 443)
+
+根据下表配置第一个目标组:
+
+| 选项 | 设置 |
+|-------------------|-------------------|
+| 目标组名称 | `rancher-tcp-443` |
+| 目标类型 | `instance` |
+| 协议 | `TCP` |
+| 端口 | `443` |
+| VPC | 选择 VPC |
+
+健康检查设置:
+
+| 选项 | 设置 |
+|---------------------|-----------------|
+| 协议 | TCP |
+| 端口 | `override`,`80` |
+| 健康阈值 | `3` |
+| 不正常阈值 | `3` |
+| 超时 | `6 秒` |
+| 间隔 | `10 秒` |
+
+单击**创建目标组**,然后创建用于 TCP 端口 80 的第二个目标组。
+
+#### 目标组(TCP 端口 80)
+
+根据下表配置第二个目标组:
+
+| 选项 | 设置 |
+|-------------------|------------------|
+| 目标组名称 | `rancher-tcp-80` |
+| 目标类型 | `instance` |
+| 协议 | `TCP` |
+| 端口 | `80` |
+| VPC | 选择 VPC |
+
+
+健康检查设置:
+
+| 选项 | 设置 |
+|---------------------|----------------|
+| 协议 | TCP |
+| 端口 | `traffic port` |
+| 健康阈值 | `3` |
+| 不正常阈值 | `3` |
+| 超时 | `6 秒` |
+| 间隔 | `10 秒` |
+
+## 2. 注册目标
+
+接下来,将 Linux 节点添加到两个目标组中。
+
+选择名为 **rancher-tcp-443** 的目标组,点击**目标**选项卡并选择**编辑**。
+
+
+
+选择你要添加的实例(Linux 节点),然后单击**添加到已注册**。
+
+***
+**将目标添加到目标组 TCP 端口 443**
+
+
+
+***
+**已将目标添加到目标组 TCP 端口 443**
+
+
+
+添加实例后,单击右下方的**保存**。
+
+将 **rancher-tcp-443** 替换为 **rancher-tcp-80**,然后重复上述步骤。你需要将相同的实例作为目标添加到此目标组。
+
+## 3. 创建 NLB
+
+使用 Amazon 的向导创建网络负载均衡器。在这个过程中,你需要添加在 [1. 创建目标组](#1-创建目标组)中创建的目标组。
+
+1. 在网页浏览器中,导航到 [Amazon EC2 控制台](https://console.aws.amazon.com/ec2/)。
+
+2. 在导航栏中,选择**负载均衡** > **负载均衡器**。
+
+3. 单击**创建负载均衡器**。
+
+4. 选择**网络负载均衡器**并单击**创建**。然后,填写每个表格。
+
+- [步骤 1:配置负载均衡器](#步骤-1配置负载均衡器)
+- [步骤 2:配置路由](#步骤-2配置路由)
+- [步骤 3:注册目标](#步骤-3注册目标)
+- [步骤 4:审核](#步骤-4审核)
+
+### 步骤 1:配置负载均衡器
+
+在表单中设置以下字段:
+
+- **名称**:`rancher`
+- **Scheme**:`internal` 或 `internet-facing`。实例和 VPC 的配置决定了 NLB 的 Scheme。如果你的实例没有绑定公共 IP,或者你只需要通过内网访问 Rancher,请将 NLB 的 Scheme 设置为 `internal` 而不是 `internet-facing`。
+- **监听器**:负载均衡器协议需要是 `TCP`,而且负载均衡器端口需要设为 `443`。
+- **可用区:**选择你的**VPC**和**可用区**。
+
+### 步骤 2:配置路由
+
+1. 从**目标组**下拉列表中,选择 **现有目标组**。
+1. 从**名称**下拉列表中,选择 `rancher-tcp-443`。
+1. 打开**高级健康检查设置**,并将**间隔**设为 `10 秒`。
+
+### 步骤 3:注册目标
+
+由于你已经在先前步骤注册了目标,因此你只需单击 **下一步:审核**。
+
+### 步骤 4:审核
+
+检查负载均衡器信息无误后,单击**创建**。
+
+AWS 完成 NLB 创建后,单击**关闭**。
+
+## 4. 为 TCP 端口 80 向 NLB 添加监听器
+
+1. 选择新创建的 NLB 并选择**监听器**选项卡。
+
+2. 单击**添加监听器**。
+
+3. 使用 `TCP`:`80` 作为**协议**:**端口**。
+
+4. 单击**添加操作**并选择**转发到..**。
+
+5. 从**转发到**下拉列表中,选择 `rancher-tcp-80`。
+
+6. 单击右上角的**保存**。
+
+## NGINX Ingress 和 Traefik Ingress 的健康检查路径
+
+K3s 和 RKE Kubernetes 集群使用的默认 Ingress 不同,因此对应的健康检查方式也不同。
+
+RKE Kubernetes 集群默认使用 NGINX Ingress,而 K3s Kubernetes 集群默认使用 Traefik Ingress。
+
+- **Traefik**:默认健康检查路径是 `/ping`。默认情况下,不管主机如何,`/ping` 总是匹配,而且 [Traefik 自身](https://docs.traefik.io/operations/ping/)总会响应。
+- **NGINX Ingress**:NGINX Ingress Controller 的默认后端有一个 `/healthz` 端点。默认情况下,不管主机如何,`/healthz` 总是匹配,而且 [`ingress-nginx` 自身](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212)总会响应。
+
+想要精确模拟健康检查,最好是使用 Host 标头(Rancher hostname)加上 `/ping` 或 `/healthz`(分别对应 K3s 和 RKE 集群)来获取 Rancher Pod 的响应,而不是 Ingress 的响应。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
new file mode 100644
index 00000000000..277961c5dae
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md
@@ -0,0 +1,73 @@
+---
+title: 为高可用 K3s Kubernetes 集群设置基础设施
+---
+
+本教程旨在帮助你为 Rancher Management Server 配置底层基础设施。
+
+我们根据 Rancher 的安装位置(K3s Kubernetes 集群、RKE Kubernetes 集群或单个 Docker 容器)为专用于 Rancher 的 Kubernetes 集群推荐不同基础设施。
+
+有关每个安装选项的详情,请参见[本页](../../../pages-for-subheaders/installation-and-upgrade.md)。
+
+:::note 重要提示:
+
+这些节点必须位于同一个区域。但是你可以把这些服务器放在不同的可用区(数据中心)。
+
+:::
+
+如需在高可用 K3s 集群中安装 Rancher Management Server,我们建议配置以下基础设施:
+
+- **2 个 Linux 节点**:可以是你的云提供商中的虚拟机。
+- **1 个外部数据库**:用于存储集群数据。建议使用 MySQL。
+- **1 个负载均衡器**:用于将流量转发到这两个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+
+### 1. 配置 Linux 节点
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置外部数据库
+
+K3s 与其他 Kubernetes 发行版不同,在于其支持使用 etcd 以外的数据库来运行 Kubernetes。该功能让 Kubernetes 运维更加灵活。你可以根据实际情况选择合适的数据库。
+
+对于 K3s 高可用安装,你需要配置一个 [MySQL](https://www.mysql.com/) 外部数据库。Rancher 已在使用 MySQL 5.7 作为数据存储的 K3s Kubernetes 集群上进行了测试。
+
+在使用 K3s 安装脚本安装 Kubernetes 时,你需要传入 K3s 连接数据库的详细信息。
+
+如需获取配置 MySQL 数据库示例,请参见[在 Amazon RDS 服务中配置 MySQL](mysql-database-in-amazon-rds.md) 的教程。
+
+如需获取配置 K3s 集群数据库的所有可用选项,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
+
+### 3. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到两个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,K3s 工具会部署一个 Traefik Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 Traefik Ingress Controller 监听发往 Rancher 主机名的流量。Traefik Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](amazon-elb-load-balancer.md)。
+
+:::caution
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 4. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
new file mode 100644
index 00000000000..24387c1f786
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md
@@ -0,0 +1,64 @@
+---
+title: 为高可用 RKE Kubernetes 集群设置基础设施
+---
+
+本教程旨在帮助你创建一个高可用的 RKE 集群,用于安装 Rancher Server。
+
+:::note 重要提示:
+
+这些节点必须位于同一个区域。但是你可以把这些服务器放在不同的可用区(数据中心)。
+
+:::
+
+如需在高可用 RKE 集群中安装 Rancher Management Server,我们建议配置以下基础设施:
+
+- **3 个 Linux 节点**:可以是你的云提供商(例如 Amazon EC2,GCE 或 vSphere)中的虚拟机。
+- **1 个负载均衡器**:用于将前端流量转发到这三个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+
+这些节点必须位于同一个区域或数据中心。但是你可以把这些服务器放在不同的可用区。
+
+### 为什么使用三个节点?
+
+在 RKE 集群中,Rancher Server 的数据存储在 etcd 中。而这个 etcd 数据库在这三个节点上运行。
+
+为了选举出大多数 etcd 节点认可的 etcd 集群 leader,etcd 数据库需要奇数个节点。如果 etcd 数据库无法选出 leader,etcd 可能会出现[脑裂(split brain)](https://www.quora.com/What-is-split-brain-in-distributed-systems)的问题,此时你需要使用备份恢复集群。如果三个 etcd 节点之一发生故障,其余两个节点可以选择一个 leader,因为它们是 etcd 节点总数的大多数部分。
+
+### 1. 配置 Linux 节点
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到三个节点中的任意一个节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,RKE 工具会部署一个 NGINX Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 NGINX Ingress Controller 监听发往 Rancher 主机名的流量。NGINX Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](amazon-elb-load-balancer.md)。
+
+:::caution
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 3. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
new file mode 100644
index 00000000000..0af8f17b3bb
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md
@@ -0,0 +1,58 @@
+---
+title: 为高可用 RKE2 Kubernetes 集群设置基础设施
+---
+
+本教程旨在帮助你为 Rancher Management Server 配置底层基础设施。
+
+我们根据 Rancher 的安装位置(RKE2 Kubernetes 集群、RKE Kubernetes 集群或单个 Docker 容器)为专用于 Rancher 的 Kubernetes 集群推荐不同基础设施。
+
+:::note 重要提示:
+
+这些节点必须位于同一个区域。但是你可以把这些服务器放在不同的可用区(数据中心)。
+
+:::
+
+如需在高可用 RKE2 集群中安装 Rancher Management Server,我们建议配置以下基础设施:
+
+- **3 个 Linux 节点**:可以是你的云提供商中的虚拟机。
+- **1 个负载均衡器**:用于将流量转发到这两个节点中。
+- **1 个 DNS 记录**:用于将 URL 映射到负载均衡器。此 DNS 记录将成为 Rancher Server 的 URL,下游集群需要可以访问到这个地址。
+
+### 1. 配置 Linux 节点
+
+请确保你的节点满足[操作系统,容器运行时,硬件和网络](../../../pages-for-subheaders/installation-requirements.md)的常规要求。
+
+如需获取配置 Linux 节点的示例,请参见[在 Amazon EC2 中配置节点](nodes-in-amazon-ec2.md)的教程。
+
+### 2. 配置负载均衡器
+
+你还需要设置一个负载均衡器,来将流量重定向到所有节点上的 Rancher 副本。配置后,当单个节点不可用时,继续保障与 Rancher Management Server 的通信。
+
+在后续步骤中配置 Kubernetes 时,RKE2 工具会部署一个 NGINX Ingress Controller。该 Controller 将侦听 worker 节点的 80 端口和 443 端口,以响应发送给特定主机名的流量。
+
+在安装 Rancher 后(也是在后续步骤中),Rancher 系统将创建一个 Ingress 资源。该 Ingress 通知 NGINX Ingress Controller 监听发往 Rancher 主机名的流量。NGINX Ingress Controller 在收到发往 Rancher 主机名的流量时,会将其转发到集群中正在运行的 Rancher Server Pod。
+
+在你的实现中,你可以考虑是否需要使用 4 层或 7 层的负载均衡器:
+
+- **4 层负载均衡器**:两种选择中较为简单的一种,它将 TCP 流量转发到你的节点中。我们建议使用 4 层负载均衡器,将流量从 TCP/80 端口和 TCP/443 端口转发到 Rancher Management 集群节点上。集群上的 Ingress Controller 会将 HTTP 流量重定向到 HTTPS,并在 TCP/443 端口上终止 SSL/TLS。Ingress Controller 会将流量转发到 Rancher deployment 中 Ingress Pod 的 TCP/80 端口。
+- **7 层负载均衡器**:相对比较复杂,但功能更全面。例如,与 Rancher 本身进行 TLS 终止相反,7 层负载均衡器能够在负载均衡器处处理 TLS 终止。如果你需要集中在基础设施中进行 TLS 终止,7 层负载均衡可能会很适合你。7 层负载均衡还能让你的负载均衡器基于 HTTP 属性(例如 cookie 等)做出决策,而 4 层负载均衡器则不能。如果你选择在 7 层负载均衡器上终止 SSL/TLS 流量,则在安装 Rancher 时(后续步骤)需要使用 `--set tls=external` 选项。详情请参见 [Rancher Helm Chart 选项](../../../getting-started/installation-and-upgrade/installation-references/helm-chart-options.md#外部-tls-终止)。
+
+如需获取配置 NGINX 负载均衡器的示例,请参见[本页](nginx-load-balancer.md)。
+
+如需获取如何配置 Amazon ELB 网络负载均衡器的指南,请参见[本页](amazon-elb-load-balancer.md)。
+
+:::caution
+
+安装后,请勿将此负载均衡(例如 `local` 集群 Ingress)用于 Rancher 以外的应用。如果此 Ingress 与其他应用共享,在其他应用的 Ingress 配置重新加载后,可能导致 Rancher 出现 websocket 错误。我们建议把 `local` 集群专用给 Rancher,不要在集群内部署其他应用。
+
+:::
+
+### 4. 配置 DNS 记录
+
+配置完负载均衡器后,你将需要创建 DNS 记录,以将流量发送到该负载均衡器。
+
+根据你的环境,DNS 记录可以是指向负载均衡器 IP 的 A 记录,也可以是指向负载均衡器主机名的 CNAME。无论是哪种情况,请确保该记录是你要 Rancher 进行响应的主机名。
+
+在安装 Rancher 时(后续步骤),你需要指定此主机名。请知悉,此主机名无法修改。请确保你设置的主机名是你想要的。
+
+有关设置 DNS 记录以将域流量转发到 Amazon ELB 负载均衡器的指南,请参见 [AWS 官方文档](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer)。
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/infrastructure-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/infrastructure-setup.md
new file mode 100644
index 00000000000..aa3c433b873
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/infrastructure-setup.md
@@ -0,0 +1,11 @@
+---
+title: Kubernetes 集群基础设施
+---
+
+
+
+
+
+要为具有外部数据库的高可用 K3s Kubernetes 集群设置基础设施,请参见[本页面](ha-k3s-kubernetes-cluster.md)。
+
+要为高可用 RKE Kubernetes 集群设置基础设施,请参见[本页面](ha-rke1-kubernetes-cluster.md)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md
new file mode 100644
index 00000000000..d312a5f6207
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md
@@ -0,0 +1,34 @@
+---
+title: 在 Amazon RDS 中创建 MySQL 数据库
+---
+
+本教程介绍如何在 Amazon Relational Database Service (RDS) 中创建 MySQL 数据库。
+
+该数据库可用作高可用 K3s Kubernetes 集群的外部数据存储。
+
+1. 登录到 [Amazon AWS RDS 控制台](https://console.aws.amazon.com/rds/)。确保选择的**区域**是你创建 EC2 实例 (Linux 节点)的区域。
+1. 在左侧面板中,点击**数据库**。
+1. 点击**创建数据库**。
+1. 在**引擎类型**中,点击 **MySQL**。
+1. 在**版本**中,选择 **MySQL 5.7.22**。
+1. 在**设置**部分的**凭证设置**下,输入 **admin** 主用户名的密码。确认密码。
+1. 展开**其它配置**。在**初始数据库名称**字段,设置数据库名称。该名称仅可包含字母,数字和下划线。这个名称会用于连接到数据库。
+1. 点击**创建数据库**。
+
+你需要获取新数据库的以下信息,以便 K3s Kubernetes 集群可以连接到该数据库。
+
+如需在 Amazon RDS 控制台查看此信息,点击**数据库**,然后点击你创建的数据库的名称。
+
+- **用户名**:使用 admin 用户名。
+- **密码**:使用 admin 密码。
+- **主机名**:使用**端点**作为主机名。端点可以在**连接性和安全性**部分找到。
+- **端口**:默认为 3306。你可以在**连接性和安全性**处确认端口。
+- **数据库名称**:前往**配置**选项卡确认数据库名称。名称会在**数据库名称**中列出。
+
+以下方格式使用上述信息,连接到数据库:
+
+```
+mysql://username:password@tcp(hostname:3306)/database-name
+```
+
+有关为 K3s 配置数据库的详情,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/installation/datastore/)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md
new file mode 100644
index 00000000000..b686c28221a
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md
@@ -0,0 +1,102 @@
+---
+title: 配置 NGINX 负载均衡器
+---
+
+将 NGINX 配置为四层负载均衡器(TCP),用于将连接转发到 Rancher 节点。
+
+在此配置中,负载均衡器位于节点的前面。负载均衡器可以是任何能运行 NGINX 的主机。
+
+:::note
+
+不要使用 Rancher 节点作为负载均衡器。
+
+:::
+
+> 这些示例中,负载均衡器将流量转发到三个 Rancher Server 节点。如果 Rancher 安装在 RKE Kubernetes 集群上,则需要三个节点。如果 Rancher 安装在 K3s Kubernetes 集群上,则只需要两个节点。
+
+## 安装 NGINX
+
+首先,在要用作负载均衡器的节点上安装 NGINX。NGINX 有适用于所有已知操作系统的软件包。已测试的版本为 `1.14` 和 `1.15`。如需获得安装 NGINX 的帮助,请参见[安装文档](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/)。
+
+`stream` 模块是必需的,NGINX 官方安装包包含了该模块。请参见你操作系统的文档,了解如何在操作系统上安装和启用 NGINX 的 `stream` 模块。
+
+## 创建 NGINX 配置
+
+安装 NGINX 后,使用节点的 IP 地址更新 NGINX 配置文件 `nginx.conf`。
+
+1. 将以下的示例代码复制并粘贴到你使用的文本编辑器中。将文件保存为 `nginx.conf`。
+
+2. 在 `nginx.conf` 中,将所有(端口 80 和端口 443)的 ``,``和 `` 替换为你节点的 IP 地址。
+
+ :::note
+
+ 参见 [NGINX 文档:TCP 和 UDP 负载均衡](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/)了解所有配置选项。
+
+ :::
+
+示例 NGINX 配置
+
+ ```
+ worker_processes 4;
+ worker_rlimit_nofile 40000;
+
+ events {
+ worker_connections 8192;
+ }
+
+ stream {
+ upstream rancher_servers_http {
+ least_conn;
+ server :80 max_fails=3 fail_timeout=5s;
+ server :80 max_fails=3 fail_timeout=5s;
+ server :80 max_fails=3 fail_timeout=5s;
+ }
+ server {
+ listen 80;
+ proxy_pass rancher_servers_http;
+ }
+
+ }
+
+ http {
+
+ upstream rancher_servers_https {
+ least_conn;
+ server :443 max_fails=3 fail_timeout=5s;
+ server :443 max_fails=3 fail_timeout=5s;
+ server :443 max_fails=3 fail_timeout=5s;
+ }
+ server {
+ listen 443 ssl;
+ proxy_pass rancher_servers_https;
+ ssl_certificate /path/to/tls.crt;
+ ssl_certificate_key /path/to/key.key;
+ location / {
+ proxy_pass https://rancher_servers_https;
+ proxy_set_header Host ;
+ proxy_ssl_server_name on;
+ proxy_ssl_name
+ }
+ }
+ }
+ ```
+
+
+3. 将 `nginx.conf` 保存到你的负载均衡器的 `/etc/nginx/nginx.conf` 路径上。
+
+4. 运行以下命令重新加载 NGINX 配置:
+
+ ```
+ # nginx -s reload
+ ```
+
+## 可选 - 将 NGINX 作为 Docker 容器运行
+
+除了将 NGINX 作为软件包安装在操作系统上外,你也可以将其作为 Docker 容器运行。将编辑后的 **NGINX 配置示例** 保存为`/etc/nginx.conf`,并运行以下命令来启动 NGINX 容器:
+
+```
+docker run -d --restart=unless-stopped \
+ -p 80:80 -p 443:443 \
+ -v /etc/nginx.conf:/etc/nginx/nginx.conf \
+ nginx:1.14
+```
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
new file mode 100644
index 00000000000..21294b1016e
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md
@@ -0,0 +1,74 @@
+---
+title: 在 Amazon EC2 中配置节点
+---
+
+在本教程中,你将学习一种为 Rancher Mangement Server 创建 Linux 节点的方法。这些节点将满足[操作系统、Docker、硬件和网络的要求](../../../pages-for-subheaders/installation-requirements.md)。
+
+如果 Rancher Server 安装在 RKE Kubernetes 集群上,你需要配置三个实例。
+
+如果 Rancher Server 安装在 K3s Kubernetes 集群上,你只需要配置两个实例。
+
+如果 Rancher Server 安装在单个 Docker 容器中,你只需要配置一个实例。
+
+### 1. 准备工作(可选)
+
+- **创建 IAM 角色**:要允许 Rancher 操作 AWS 资源,例如创建新存储或新节点,你需要将 Amazon 配置为云提供商。要在 EC2 上设置云提供商,你需要进行几个操作,其中包括为 Rancher Server 节点设置 IAM 角色。有关设置云提供商的详情,请参见[本页](../../../pages-for-subheaders/set-up-cloud-providers.md)。
+- **创建安全组**:我们建议为 Rancher 节点设置一个符合 [Rancher 节点端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+
+### 2. 配置实例
+
+1. 登录到 [Amazon AWS EC2 控制台](https://console.aws.amazon.com/ec2/)。由于 Rancher Management Server 的所有基础设施都需要位于同一区域,因此,请务必记下创建 EC2 实例(Linux 节点)的**区域**。
+1. 在左侧面板中,点击**实例**。
+1. 点击**启动示例**。
+1. 在**步骤 1:选择 Amazon Machine Image (AMI)** 中,使用 `ami-0d1cd67c26f5fca19 (64-bit x86)` 来使用 Ubuntu 18.04 作为 Linux 操作系统。去到 Ubuntu AMI 并点击**选择**。
+1. 在**步骤 2:选择实例类型**中,选择 `t2.medium`。
+1. 点击**下一步:配置实例详细信息**。
+1. 在**实例数量**字段中,输入实例数量。创建高可用 K3s 集群仅需要两个实例,而高可用 RKE 集群则需要三个实例。
+1. 可选:如果你为 Rancher 创建了一个 IAM 角色来操作 AWS 资源,请在 **IAM 角色**字段中选择新 IAM 角色。
+1. 分别点击**下一步:添加存储**,**下一步:添加标签**和**下一步:配置安全组**。
+1. 在**步骤 6:配置安全组**中,选择一个符合 Rancher 节点[端口要求](../../../pages-for-subheaders/installation-requirements.md#端口要求)的安全组。
+1. 点击**查看并启动**。
+1. 点击**启动**。
+1. 选择一个新的或现有的密钥对,用于之后连接到你的实例。如果使用现有密钥对,请确保你有访问私钥的权限。
+1. 点击**启动实例**。
+
+
+**结果**:你已创建满足操作系统、硬件和网络要求的 Rancher 节点。
+
+:::note
+
+如果节点用于 RKE Kubernetes 集群,请在下一步操作中为每个节点安装 Docker 。如果节点用于 K3s Kubernetes 集群,你可以开始在节点上安装 K3s 了。
+
+:::
+
+### 3. 为 RKE Kubernetes 集群节点安装 Docker 并创建用户
+
+1. 在 [AWS EC2 控制台](https://console.aws.amazon.com/ec2/)中,点击左侧面板中的**实例**。
+1. 转到你想要安装 Docker 的实例。选择实例,并点击**操作 > 连接**。
+1. 按照屏幕上的说明连接到实例。复制实例的公共 DNS。SSH 进入实例的示例命令如下:
+```
+sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance]
+```
+1. 在实例上运行以下命令,使用 Rancher 的其中一个安装脚本来安装 Docker:
+```
+curl https://releases.rancher.com/install-docker/18.09.sh | sh
+```
+1. 连接到实例后,在实例上运行以下命令来创建用户:
+```
+sudo usermod -aG docker ubuntu
+```
+1. 在每个节点上重复上述步骤,以确保 Docker 安装到每个用于运行 Rancher Management Server 的节点上。
+
+:::tip
+
+要了解我们是否提供指定的 Docker 版本的安装脚本,请访问此 [GitHub 仓库](https://github.com/rancher/install-docker),该仓库包含 Rancher 的所有 Docker 安装脚本。
+
+:::
+
+**结果**:你已配置满足操作系统、Docker、硬件和网络要求的 Rancher Server 节点。
+
+### RKE Kubernetes 集群节点的后续步骤
+
+如需在新节点上安装 RKE 集群,请记住每个节点的 **IPv4 公共 IP** 和 **私有 IP**。创建节点后,此信息可以在每个节点的**描述**选项卡中找到。公共和私有 IP 将用于设置 RKE 集群配置文件 `rancher-cluster.yml` 中每个节点的 `address` 和 `internal_address`。
+
+RKE 还需要访问私钥才能连接到每个节点。因此,请记住连接到节点的私钥的路径,该路径也可用于设置 `rancher-cluster.yml` 中每个节点的 `ssh_key_path`。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
new file mode 100644
index 00000000000..2f28274e31b
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md
@@ -0,0 +1,25 @@
+---
+title: 高可用安装
+---
+
+我们建议使用 Helm( Kubernetes 包管理器 )在专用的 Kubernetes 集群上安装 Rancher。由于 Rancher 运行在多个节点上提升了可用性,因此这种安装方式叫做高可用 Kubernetes 安装。
+
+在标准安装中,先将 Kubernetes 安装在托管在云提供商(例如 Amazon 的 EC2 或 Google Compute Engine)中的三个节点上。
+
+然后使用 Helm 在 Kubernetes 集群上安装 Rancher。Helm 使用 Rancher 的 Helm Chart 在 Kubernetes 集群的三个节点中均安装 Rancher 的副本。我们建议使用负载均衡器将流量转发到集群中的每个 Rancher 副本中,以提高 Rancher 的可用性。
+
+Rancher Server 的数据存储在 etcd 中。etcd 数据库可以在所有三个节点上运行。为了选举出大多数 etcd 节点认同的 etcd 集群 leader,节点的数量需要是奇数。如果 etcd 数据库不能选出 leader,etcd 可能会失败。这时候就需要使用备份来还原集群。
+
+有关 Rancher 如何工作的详情(与安装方法无关),请参见[架构](../../../pages-for-subheaders/rancher-manager-architecture.md)。
+
+### 推荐架构
+
+- Rancher 的 DNS 应该解析为 4 层负载均衡器。
+- 负载均衡器应该把 TCP/80 端口和 TCP/443 端口的流量转发到 Kubernetes 集群的全部 3 个节点上。
+- Ingress Controller 会把 HTTP 重定向到 HTTPS,在 TCP/443 端口终结 SSL/TLS。
+- Ingress Controller 会把流量转发到 Rancher deployment 的 Pod 上的 TCP/80 端口。
+
+使用 4 层负载均衡器在 Kubernetes 集群中安装 Rancher:Ingress Controller 的 SSL 终止:
+
+
+使用 4 层负载均衡器在 Kubernetes 集群中安装 Rancher:Ingress Controller 的 SSL 终止
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md
new file mode 100644
index 00000000000..4d8ca8ac24c
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md
@@ -0,0 +1,136 @@
+---
+title: 为 Rancher 设置高可用 K3s Kubernetes 集群
+---
+
+本文介绍了如何根据 [Rancher Server 环境的最佳实践](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#kubernetes-安装环境)安装 Kubernetes 集群。
+
+如果你的系统无法直接访问互联网,请参见离线安装说明。
+
+:::tip 单节点安装提示:
+
+在单节点 Kubernetes 集群中,Rancher Server 不具备高可用性,而高可用性对在生产环境中运行 Rancher 非常重要。但是,如果你想要短期内使用单节点节省资源,同时又保留高可用性迁移路径,把 Rancher 安装到单节点集群也是合适的。
+
+要配置单节点 K3s 集群,你只需要在单个节点上运行 Rancher Server 安装命令(不需要在两个节点上运行命令)。
+
+在这两种单节点设置中,Rancher 可以与 Helm 一起安装在 Kubernetes 集群上,安装方法与安装到其他集群上一样。
+
+:::
+
+## 先决条件
+
+以下说明假设你已参见[此章节](../infrastructure-setup/ha-k3s-kubernetes-cluster.md)配置好两个节点,一个负载均衡器,一个 DNS 记录和一个外部 MySQL 数据库。
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见 [Rancher 支持矩阵](https://rancher.com/support-maintenance-terms/)。
+
+如需指定 K3s(Kubernetes)版本,在运行 K3s 安装脚本时使用 `INSTALL_K3S_VERSION` 环境变量(例如 `INSTALL_K3S_VERSION="v1.24.10+k3s1"`)。
+
+## 安装 Kubernetes
+
+### 1. 安装 Kubernetes 并设置 K3s Server
+
+在运行启动 K3s Kubernetes API Server 的命令时,你会传入使用之前设置的外部数据存储的选项。
+
+1. 连接到你准备用于运行 Rancher Server 的其中一个 Linux 节点。
+1. 在 Linux 节点上,运行以下命令来启动 K3s Server,并将其连接到外部数据存储。
+ ```
+ curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION= sh -s - server \
+ --datastore-endpoint=""
+ ```
+
+ 其中 `` 是数据存储的连接 URI。例如,如果你使用的是 MySQL,则为 `mysql://username:password@tcp(hostname:3306)/database-name`。有效的数据存储包括 etcd、MySQL、PostgreSQL 或 SQLite(默认)。
+
+ :::note
+
+ 你也可以使用 `$K3S_DATASTORE_ENDPOINT` 环境变量来传递数据存储端点。
+
+ :::
+
+1. 获取主 Server 节点令牌:
+ ```
+ cat /var/lib/rancher/k3s/server/token
+ ```
+
+1. 在第二个 K3s Server 节点上运行命令:
+ ```
+ curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION= sh -s - server \
+ --datastore-endpoint="" \
+ --token ""
+ ```
+
+### 2. 检查 K3s 是否正常运行
+
+在其中一个 K3s Server 节点上运行以下命令,来确认 K3s 是否已经设置成功:
+```
+sudo k3s kubectl get nodes
+```
+
+然后你会看到两个具有 master 角色的节点。
+```
+ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes
+NAME STATUS ROLES AGE VERSION
+ip-172-31-60-194 Ready master 44m v1.17.2+k3s1
+ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1
+```
+
+测试集群 Pod 的健康状况:
+```
+sudo k3s kubectl get pods --all-namespaces
+```
+
+**结果**:你已成功配置 K3s Kubernetes 集群。
+
+### 3. 保存并开始使用 kubeconfig 文件
+
+在每个 Rancher Server 节点安装 K3s 时,会在每个节点的 `/etc/rancher/k3s/k3s.yaml` 中生成一个 `kubeconfig` 文件。该文件包含访问集群的凭证。请将该文件保存在安全的位置。
+
+如要使用该 `kubeconfig` 文件:
+
+1. 安装 Kubernetes 命令行工具 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl)。
+2. 复制 `/etc/rancher/k3s/k3s.yaml` 文件并保存到本地主机的 `~/.kube/config` 目录上。
+3. 在 kubeconfig 文件中,`server` 的参数为 localhost。你需要将 `server` 配置为负载均衡器的 DNS,并指定端口 6443(通过端口 6443 访问 Kubernetes API Server,通过端口 80 和 443 访问 Rancher Server)。以下是一个 `k3s.yaml` 示例:
+
+```yml
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: [CERTIFICATE-DATA]
+ server: [LOAD-BALANCER-DNS]:6443 # 编辑此行
+ name: default
+contexts:
+- context:
+ cluster: default
+ user: default
+ name: default
+current-context: default
+kind: Config
+preferences: {}
+users:
+- name: default
+ user:
+ password: [PASSWORD]
+ username: admin
+```
+
+**结果**:你可以开始使用 `kubectl` 来管理你的 K3s 集群。如果你有多个 `kubeconfig` 文件,在使用 `kubectl` 时,你可以传入文件路径来指定要使用的 `kubeconfig` 文件:
+
+```
+kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces
+```
+
+有关 `kubeconfig` 文件的详情,请参见 [K3s 官方文档](https://rancher.com/docs/k3s/latest/en/cluster-access/) 或 [ Kubernetes 官方文档](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)中关于使用 `kubeconfig` 文件管理集群访问的部分。
+
+### 4. 检查集群 Pod 的健康状况
+
+现在你已经设置了 `kubeconfig` 文件。你可以使用 `kubectl` 在本地主机访问集群。
+
+检查所有需要的 Pod 和容器是否健康:
+
+```
+ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces
+NAMESPACE NAME READY STATUS RESTARTS AGE
+kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d
+kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d
+kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d
+```
+
+**结果**:你可通过使用 `kubectl` 访问集群,并且 K3s 集群能成功运行。现在,你可以在集群上安装 Rancher Management Server。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/kubernetes-cluster-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/kubernetes-cluster-setup.md
new file mode 100644
index 00000000000..f8213ffbbac
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/kubernetes-cluster-setup.md
@@ -0,0 +1,11 @@
+---
+title: "Kubernetes 使用教程"
+---
+
+
+
+
+
+本章节介绍如何安装 Kubernetes 集群,使得 Rancher Server 可以安装在该集群上。
+
+Rancher 可以在任何 Kubernetes 集群上运行。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
new file mode 100644
index 00000000000..397fe2aa0d0
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md
@@ -0,0 +1,193 @@
+---
+title: 配置高可用的 RKE Kubernetes 集群
+---
+
+本文介绍如何安装 Kubernetes 集群。该集群应专用于运行 Rancher Server。
+
+:::note
+
+Rancher 可以运行在任何 Kubernetes 集群上,包括托管的 Kubernetes,例如 Amazon EKS。以下说明只是安装 Kubernetes 其中一种方式。
+
+:::
+
+如果系统无法直接访问互联网,请参见[离线环境:Kubernetes 安装](../../../pages-for-subheaders/air-gapped-helm-cli-install.md)。
+
+:::tip 单节点安装提示:
+
+在单节点 Kubernetes 集群中,Rancher Server 不具备高可用性,而高可用性对在生产环境中运行 Rancher 非常重要。但是,如果你想要短期内使用单节点节省资源,同时又保留高可用性迁移路径,把 Rancher 安装到单节点集群也是合适的。
+
+要设置单节点 RKE 集群,在 `cluster.yml` 中配置一个节点。该节点需具备所有三个角色,分别是`etcd`,`controlplane`和`worker`。
+
+在这两种单节点设置中,Rancher 可以与 Helm 一起安装在 Kubernetes 集群上,安装方法与安装到其他集群上一样。
+
+:::
+
+## 安装 Kubernetes
+
+### 所需的 CLI 工具
+
+安装 Kubernetes 命令行工具 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl)。
+
+安装 [RKE](https://rancher.com/docs/rke/latest/en/installation/)(Rancher Kubernetes Engine,是一个 Kubernetes 发行版和命令行工具)。
+
+### 1. 创建集群配置文件
+
+在这部分,你将创建一个名为 `rancher-cluster.yml`的 Kubernetes 集群配置文件。在后续使用 RKE 命令设置集群的步骤中,此文件会用于在节点上安装 Kubernetes。
+
+使用下面的示例作为指南,创建 `rancher-cluster.yml` 文件。将 `nodes` 列表中的 IP 地址替换为你创建的 3 个节点的 IP 地址或 DNS 名称。
+
+如果你的节点有公共地址和内部地址,建议设置 `internal_address:` 以便 Kubernetes 使用它实现集群内部通信。如果你想使用引用安全组或防火墙,某些服务(如 AWS EC2)要求设置 `internal_address:`。
+
+RKE 需要通过 SSH 连接到每个节点,它会在 `~/.ssh/id_rsa`的默认位置查找私钥。如果某个节点的私钥不在默认位置中,你还需要为该节点配置 `ssh_key_path` 选项。
+
+在选择 Kubernetes 版本时,请务必先查阅[支持矩阵](https://rancher.com/support-matrix/),以找出已针对你的 Rancher 版本验证的最新 Kubernetes 版本。
+
+```yaml
+nodes:
+ - address: 165.227.114.63
+ internal_address: 172.16.22.12
+ user: ubuntu
+ role: [controlplane, worker, etcd]
+ - address: 165.227.116.167
+ internal_address: 172.16.32.37
+ user: ubuntu
+ role: [controlplane, worker, etcd]
+ - address: 165.227.127.226
+ internal_address: 172.16.42.73
+ user: ubuntu
+ role: [controlplane, worker, etcd]
+
+services:
+ etcd:
+ snapshot: true
+ creation: 6h
+ retention: 24h
+
+# Required for external TLS termination with
+# ingress-nginx v0.22+
+ingress:
+ provider: nginx
+ options:
+ use-forwarded-headers: "true"
+
+kubernetes_version: v1.25.6-rancher4-1
+```
+
+通用 RKE 节点选项
+
+| 选项 | 必填 | 描述 |
+| ------------------ | -------- | -------------------------------------------------------------------------------------- |
+| `address` | 是 | 公共 DNS 或 IP 地址 |
+| `user` | 是 | 可以运行 docker 命令的用户 |
+| `role` | 是 | 分配给节点的 Kubernetes 角色列表 |
+| `internal_address` | 否 | 内部集群流量的私有 DNS 或 IP 地址 |
+| `ssh_key_path` | 否 | 用来验证节点的 SSH 私钥文件路径(默认值为 `~/.ssh/id_rsa`) |
+
+:::note 高级配置:
+
+RKE 提供大量配置选项,用于针对你的环境进行自定义安装。
+
+如需了解选项和功能的完整列表,请参见 [RKE 官方文档](https://rancher.com/docs/rke/latest/en/config-options/)。
+
+要为大规模 Rancher 安装优化 etcd 集群,请参见 [etcd 设置指南](../../advanced-user-guides/tune-etcd-for-large-installs.md)。
+
+有关 Dockershim 支持的详情,请参见[此页面](../../../getting-started/installation-and-upgrade/installation-requirements/dockershim.md)。
+
+:::
+
+### 2. 运行 RKE
+
+```
+rke up --config ./rancher-cluster.yml
+```
+
+完成后,结束行应该是:`Finished build Kubernetes cluster succeeded`。
+
+### 3. 测试集群
+
+本节介绍如何设置工作区,以便你可以使用 `kubectl` 命令行工具与此集群进行交互。
+
+如果你已安装 `kubectl`,你需要将 `kubeconfig` 文件放在 `kubectl` 可访问的位置。`kubeconfig` 文件包含使用 `kubectl` 访问集群所需的凭证。
+
+你在运行 `rke up` 时,RKE 应该已经创建了一个名为 `kube_config_cluster.yml`的 `kubeconfig` 文件。该文件具有 `kubectl` 和 `helm`的凭证。
+
+:::note
+
+如果你的文件名不是 `rancher-cluster.yml`,kubeconfig 文件将命名为 `kube_config_.yml`。
+
+:::
+
+将此文件移动到 `$HOME/.kube/config`。如果你使用多个 Kubernetes 集群,将 `KUBECONFIG` 环境变量设置为 `kube_config_cluster.yml` 的路径:
+
+```
+export KUBECONFIG=$(pwd)/kube_config_cluster.yml
+```
+
+用 `kubectl` 测试你的连接性,并查看你的所有节点是否都处于 `Ready` 状态:
+
+```
+kubectl get nodes
+
+NAME STATUS ROLES AGE VERSION
+165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5
+165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5
+165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5
+```
+
+### 4. 检查集群 Pod 的健康状况
+
+检查所有需要的 Pod 和容器是否健康。
+
+- Pod 处于 `Running` 或 `Completed` 状态。
+- `READY` 表示运行 `STATUS` 为 `Running` 的 Pod 的所有容器(例如, `3/3`)。
+- `STATUS` 为 `Completed` 的 Pod 是一次运行的 Job。这些 Pod `READY` 列的值应该为 `0/1`。
+
+```
+kubectl get pods --all-namespaces
+
+NAMESPACE NAME READY STATUS RESTARTS AGE
+ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s
+ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s
+ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s
+kube-system canal-jp4hz 3/3 Running 0 30s
+kube-system canal-z2hg8 3/3 Running 0 30s
+kube-system canal-z6kpw 3/3 Running 0 30s
+kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s
+kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s
+kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s
+kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s
+kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s
+kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s
+kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s
+```
+
+这表示你已成功安装了可运行 Rancher Server 的 Kubernetes 集群。
+
+### 5. 保存你的文件
+
+:::note 重要提示:
+
+维护、排除问题和升级集群需要用到以下文件,请妥善保管这些文件:
+
+:::
+
+将以下文件的副本保存在安全位置:
+
+- `rancher-cluster.yml`:RKE 集群配置文件。
+- `kube_config_cluster.yml`:集群的 [Kubeconfig 文件](https://rancher.com/docs/rke/latest/en/kubeconfig/)。该文件包含可完全访问集群的凭证。
+- `rancher-cluster.rkestate`:[Kubernetes 状态文件](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state)。此文件包括用于完全访问集群的凭证。 _Kubernetes 集群状态文件仅在 RKE 版本是 0.2.0 或更高版本时生成。_
+
+:::note
+
+后两个文件名中的 `rancher-cluster` 部分取决于你命名 RKE 集群配置文件的方式。
+
+:::
+
+### 故障排除
+
+参见[故障排除](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md)页面。
+
+
+### 后续操作
+[安装 Rancher](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)
+
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md
new file mode 100644
index 00000000000..cc61efd50e4
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md
@@ -0,0 +1,165 @@
+---
+title: 为 Rancher 设置高可用的 RKE2 Kubernetes 集群
+---
+
+本文介绍了如何根据 [Rancher Server 环境的最佳实践](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#kubernetes-安装环境)安装 Kubernetes 集群。
+
+## 先决条件
+
+以下说明假设你已参见[此章节](../infrastructure-setup/ha-rke2-kubernetes-cluster.md)配置好三个节点,一个负载均衡器和一个 DNS 记录。
+
+为了让 RKE2 与负载均衡器正常工作,你需要设置两个监听器,一个用于 9345 端口,另一个用于 6443 端口的 Kubernetes API。
+
+Rancher 需要安装在支持的 Kubernetes 版本上。如需了解你使用的 Rancher 版本支持哪些 Kubernetes 版本,请参见[支持维护条款](https://rancher.com/support-maintenance-terms/)。如需指定 RKE2 版本,请在运行 RKE2 安装脚本时,使用 `INSTALL_RKE2_VERSION` 环境变量。
+
+## 安装 Kubernetes
+
+### 1. 安装 Kubernetes 并设置 RKE2 Server
+
+RKE2 Server 使用嵌入式 etcd 运行。因此你不需要设置外部数据存储就可以在 HA 模式下运行。
+
+在第一个节点上,使用你的预共享密文作为 Token 来设置配置文件。Token 参数可以在启动时设置。
+
+如果你不指定预共享密文,RKE2 会生成一个预共享密文并将它放在 `/var/lib/rancher/rke2/server/node-token` 中。
+
+为了避免固定注册地址的证书错误,请在启动 Server 时设置 `tls-san` 参数。这个选项在 Server 的 TLS 证书中增加一个额外的主机名或 IP 作为 Subject Alternative Name。如果你想通过 IP 和主机名访问,你可以将它指定为一个列表。
+
+首先,创建用于存放 RKE2 配置文件的目录:
+
+```
+mkdir -p /etc/rancher/rke2/
+```
+
+然后,参见以下示例在 `/etc/rancher/rke2/config.yaml` 中创建 RKE2 配置文件:
+
+```
+token: my-shared-secret
+tls-san:
+ - my-kubernetes-domain.com
+ - another-kubernetes-domain.com
+```
+之后,运行安装命令并启用和启动 RKE2:
+
+```
+curl -sfL https://get.rke2.io | sh -
+systemctl enable rke2-server.service
+systemctl start rke2-server.service
+```
+1. 要加入其余的节点,使用同一个共享或自动生成的 Token 来配置每个额外的节点。以下是配置文件的示例:
+
+ token: my-shared-secret
+ server: https://:9345
+ tls-san:
+ - my-kubernetes-domain.com
+ - another-kubernetes-domain.com
+运行安装程序,然后启用并启动 RKE2:
+
+ curl -sfL https://get.rke2.io | sh -
+ systemctl enable rke2-server.service
+ systemctl start rke2-server.service
+
+
+1. 在第三 RKE2 Server 节点上运行同样的命令。
+
+### 2. 检查 RKE2 是否正常运行
+
+在所有 Server 节点上启动了 RKE2 Server 进程后,确保集群已经正常启动,请运行以下命令:
+
+```
+/var/lib/rancher/rke2/bin/kubectl \
+ --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes
+现在,Server 节点的状态应该是 Ready。
+```
+
+测试集群 Pod 的健康状况:
+```
+/var/lib/rancher/rke2/bin/kubectl \
+ --kubeconfig /etc/rancher/rke2/rke2.yaml get pods --all-namespaces
+```
+
+**结果**:你已成功配置 RKE2 Kubernetes 集群。
+
+### 3. 保存并开始使用 kubeconfig 文件
+
+在每个 Rancher Server 节点安装 RKE2 时,会在每个节点的 `/etc/rancher/rke2/rke2.yaml` 中生成一个 `kubeconfig` 文件。该文件包含访问集群的凭证。请将该文件保存在安全的位置。
+
+如要使用该 `kubeconfig` 文件:
+
+1. 安装 Kubernetes 命令行工具 [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl)。
+2. 复制 `/etc/rancher/rke2/rke2.yaml` 文件并保存到本地主机的 `~/.kube/config` 目录上。
+3. 在 kubeconfig 文件中,`server` 的参数为 localhost。在端口 6443 上将服务器配置为 controlplane 负载均衡器的 DNS(RKE2 Kubernetes API Server 使用端口 6443,而 Rancher Server 将通过 NGINX Ingress 在端口 80 和 443 上提供服务。)以下是一个示例 `rke2.yaml`:
+
+```yml
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: [CERTIFICATE-DATA]
+ server: [LOAD-BALANCER-DNS]:6443 # 编辑此行
+ name: default
+contexts:
+- context:
+ cluster: default
+ user: default
+ name: default
+current-context: default
+kind: Config
+preferences: {}
+users:
+- name: default
+ user:
+ password: [PASSWORD]
+ username: admin
+```
+
+**结果**:你可以开始使用 `kubectl` 来管理你的 RKE2 集群。如果你有多个 `kubeconfig` 文件,在使用 `kubectl` 时,你可以传入文件路径来指定要使用的 `kubeconfig` 文件:
+
+```
+kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces
+```
+
+有关 `kubeconfig` 文件的详情,请参见 [RKE2 官方文档](https://docs.rke2.io/cluster_access)或 [ Kubernetes 官方文档](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)中关于使用 `kubeconfig` 文件管理集群访问的部分。
+
+### 4. 检查集群 Pod 的健康状况
+
+现在你已经设置了 `kubeconfig` 文件。你可以使用 `kubectl` 在本地主机访问集群。
+
+检查所有需要的 Pod 和容器是否健康:
+
+```
+/var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A
+NAMESPACE NAME READY STATUS RESTARTS AGE
+kube-system cloud-controller-manager-rke2-server-1 1/1 Running 0 2m28s
+kube-system cloud-controller-manager-rke2-server-2 1/1 Running 0 61s
+kube-system cloud-controller-manager-rke2-server-3 1/1 Running 0 49s
+kube-system etcd-rke2-server-1 1/1 Running 0 2m13s
+kube-system etcd-rke2-server-2 1/1 Running 0 87s
+kube-system etcd-rke2-server-3 1/1 Running 0 56s
+kube-system helm-install-rke2-canal-hs6sx 0/1 Completed 0 2m17s
+kube-system helm-install-rke2-coredns-xmzm8 0/1 Completed 0 2m17s
+kube-system helm-install-rke2-ingress-nginx-flwnl 0/1 Completed 0 2m17s
+kube-system helm-install-rke2-metrics-server-7sggn 0/1 Completed 0 2m17s
+kube-system kube-apiserver-rke2-server-1 1/1 Running 0 116s
+kube-system kube-apiserver-rke2-server-2 1/1 Running 0 66s
+kube-system kube-apiserver-rke2-server-3 1/1 Running 0 48s
+kube-system kube-controller-manager-rke2-server-1 1/1 Running 0 2m30s
+kube-system kube-controller-manager-rke2-server-2 1/1 Running 0 57s
+kube-system kube-controller-manager-rke2-server-3 1/1 Running 0 42s
+kube-system kube-proxy-rke2-server-1 1/1 Running 0 2m25s
+kube-system kube-proxy-rke2-server-2 1/1 Running 0 59s
+kube-system kube-proxy-rke2-server-3 1/1 Running 0 85s
+kube-system kube-scheduler-rke2-server-1 1/1 Running 0 2m30s
+kube-system kube-scheduler-rke2-server-2 1/1 Running 0 57s
+kube-system kube-scheduler-rke2-server-3 1/1 Running 0 42s
+kube-system rke2-canal-b9lvm 2/2 Running 0 91s
+kube-system rke2-canal-khwp2 2/2 Running 0 2m5s
+kube-system rke2-canal-swfmq 2/2 Running 0 105s
+kube-system rke2-coredns-rke2-coredns-547d5499cb-6tvwb 1/1 Running 0 92s
+kube-system rke2-coredns-rke2-coredns-547d5499cb-rdttj 1/1 Running 0 2m8s
+kube-system rke2-coredns-rke2-coredns-autoscaler-65c9bb465d-85sq5 1/1 Running 0 2m8s
+kube-system rke2-ingress-nginx-controller-69qxc 1/1 Running 0 52s
+kube-system rke2-ingress-nginx-controller-7hprp 1/1 Running 0 52s
+kube-system rke2-ingress-nginx-controller-x658h 1/1 Running 0 52s
+kube-system rke2-metrics-server-6564db4569-vdfkn 1/1 Running 0 66s
+```
+
+**结果**:你可通过使用 `kubectl` 访问集群,并且 RKE2 集群能成功运行。现在,你可以在集群上安装 Rancher Management Server。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/checklist-for-production-ready-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
similarity index 68%
rename from i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/checklist-for-production-ready-clusters.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
index 5c925f124ed..6df361f420e 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/current/pages-for-subheaders/checklist-for-production-ready-clusters.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/checklist-for-production-ready-clusters.md
@@ -2,17 +2,21 @@
title: 生产就绪集群检查清单
---
+
+
+
+
本节将介绍创建生产就绪型 Kubernetes 集群的最佳实践。这个集群可用于运行你的应用和服务。
-有关集群的要求(包括对 OS/Docker、硬件和网络的要求),请参阅[节点要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md)。
+有关集群的要求(包括对 OS/Docker、硬件和网络的要求),请参阅[节点要求](../node-requirements-for-rancher-managed-clusters.md)部分。
本文介绍了我们推荐用于所有生产集群的最佳实践的简短列表。
-如需获取推荐的所有最佳实践的完整列表,请参阅[最佳实践](best-practices.md)。
+如需获取推荐的所有最佳实践的完整列表,请参阅[最佳实践](../../../../reference-guides/best-practices/best-practices.md)部分。
### 节点要求
-* 确保你的节点满足所有[节点要求](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md),包括端口要求。
+* 确保你的节点满足所有[节点要求](../node-requirements-for-rancher-managed-clusters.md),包括端口要求。
### 备份 etcd
@@ -29,9 +33,9 @@ title: 生产就绪集群检查清单
* 为两个或更多节点分配 `controlplane` 角色,能实现主组件的高可用性。
* 为两个或多个节点分配 `worker` 角色,以便在节点故障时重新安排工作负载。
-有关每个角色的用途的更多信息,请参阅 [Kubernetes 中的节点角色](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md)。
+有关每个角色的用途的更多信息,请参阅 [Kubernetes 中的节点角色](roles-for-nodes-in-kubernetes.md)。
-有关每个 Kubernetes 角色的节点数的详细信息,请参阅[推荐架构](../reference-guides/rancher-manager-architecture/architecture-recommendations.md)。
+有关每个 Kubernetes 角色的节点数的详细信息,请参阅[推荐架构](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md)部分。
### Logging 和 Monitoring
@@ -44,5 +48,5 @@ title: 生产就绪集群检查清单
### 网络
-* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://coreos.com/etcd/docs/latest/tuning.html) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
-* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
+* 最小化网络延迟。Rancher 建议尽量减少 etcd 节点之间的延迟。`heartbeat-interval` 的默认设置是 `500`,`election-timeout` 的默认设置是 `5000`。这些 [etcd 调优设置](https://etcd.io/docs/v3.5/tuning/) 允许 etcd 在大多数网络(网络延迟特别高的情况下除外)中运行。
+* 集群节点应位于单个区域内。大多数云厂商在一个区域内提供多个可用区,这可以提高你集群的可用性。任何角色的节点都可以使用多个可用区。如果你使用 [Kubernetes Cloud Provider](../set-up-cloud-providers/set-up-cloud-providers.md) 资源,请查阅文档以了解限制(即区域存储限制)。
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
new file mode 100644
index 00000000000..c6b1ef60709
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md
@@ -0,0 +1,73 @@
+---
+title: 推荐的集群架构
+---
+
+有三个角色可以分配给节点,分别是 `etcd`、`controlplane` 和 `worker`。
+
+## 将 Worker 节点与具有其他角色的节点分开
+
+在设计集群时,你有两种选择:
+
+* 为每个角色使用专用节点。这确保了特定角色所需组件的资源可用性。它还根据[端口要求](../node-requirements-for-rancher-managed-clusters.md#网络要求)严格隔离每个角色之间的网络流量。
+* 将 `etcd` 和 `controlplane` 角色分配给相同的节点。该节点必须满足这两个角色的硬件要求。
+
+无论在哪种情况下,都不应该在具有 `etcd` 或 `controlplane` 角色的节点中使用或添加 `worker` 角色。
+
+因此,每个节点的角色都有如下几种配置选择:
+
+* `etcd`
+* `controlplane`
+* `etcd` 和 `controlplane`
+* `worker`
+
+## 每个角色的推荐节点数
+
+集群应该有:
+
+- 至少拥有三个角色为 `etcd` 的节点,来确保失去一个节点时仍能存活。增加 etcd 节点数量能提高容错率,而将 etcd 分散到不同可用区甚至能获取更好的容错能力。
+- 至少两个节点具有 `controlplane` 角色,以实现主组件高可用性。
+- 至少两个具有 `worker` 角色的节点,用于在节点故障时重新安排工作负载。
+
+有关每个角色的用途的更多信息,请参阅 [Kubernetes 中的节点角色](roles-for-nodes-in-kubernetes.md)。
+
+
+### controlplane 节点数
+
+添加多个具有 `controlplane` 角色的节点,使每个主组件都具有高可用性。
+
+### etcd 节点数
+
+在保持集群可用性的同时,可以一次丢失的节点数由分配了 `etcd` 角色的节点数决定。对于具有 n 个成员的集群,最小值为 (n/2)+1。因此,我们建议在一个区域内的 3 个不同可用区中各创建一个 `etcd` 节点,以在一个可用区丢失的情况下存活。如果你只使用两个区域,那么在“多数节点”所在的可用区不可用时,你将会丢失 etcd 集群。
+
+| 具有 `etcd` 角色的节点 | 多数节点 | 容错能力 |
+|--------------|------------|-------------------|
+| 1 | 1 | 0 |
+| 2 | 2 | 0 |
+| 3 | 2 | **1** |
+| 4 | 3 | 1 |
+| 5 | 3 | **2** |
+| 6 | 4 | 2 |
+| 7 | 4 | **3** |
+| 8 | 5 | 3 |
+| 9 | 5 | **4** |
+
+参考:
+
+* [最佳 etcd 集群大小的官方 etcd 文档](https://etcd.io/docs/v3.5/faq/#what-is-failure-tolerance)
+* [为 Kubernetes 操作 etcd 集群的官方 Kubernetes 文档](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/)
+
+### Worker 节点数
+
+添加多个具有 `worker` 角色的节点能确保一个节点出现故障时可以重新安排工作负载。
+
+### 为什么 Rancher 集群和运行应用的集群的生产要求不同
+
+你可能已经注意到我们的 [Kubernetes 安装](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md)说明并不符合我们对生产就绪集群的要求,这是因为 `worker` 角色没有专用节点。然而,你 Rancher 中的这个三节点集群是有效的,因为:
+
+* 它允许一个 `etcd` 节点故障。
+* 它通过多个 `controlplane` 节点来维护 master 组件的多个实例。
+* 此集群上没有创建除 Rancher 之外的其他工作负载。
+
+## 参考
+
+* [Kubernetes:主组件](https://kubernetes.io/docs/concepts/overview/components/#master-components)
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
new file mode 100644
index 00000000000..1f929eb52f9
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md
@@ -0,0 +1,50 @@
+---
+title: Kubernetes 中节点的角色
+---
+
+本节介绍 Kubernetes 中 etcd 节点、controlplane 节点和 worker 节点的角色,以及这些角色如何在集群中协同工作。
+
+此图适用于 [Rancher 通过 RKE 部署的 Kubernetes 集群](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md):
+
+
+线条表示组件之间的通信。而颜色纯粹用于视觉辅助。
+
+## etcd
+
+具有 `etcd` 角色的节点运行 etcd,这是一个一致且高可用的键值存储,用作 Kubernetes 所有集群数据的后备存储。etcd 将数据复制到每个节点。
+
+:::note
+
+具有 `etcd` 角色的节点在 UI 中显示为`不可调度`,即默认情况下不会将 Pod 调度到这些节点。
+
+:::
+
+## controlplane
+
+具有 `controlplane` 角色的节点运行 Kubernetes 主组件(不包括 `etcd`,因为它是一个单独的角色)。有关组件的详细列表,请参阅 [Kubernetes:主组件](https://kubernetes.io/docs/concepts/overview/components/#master-components)。
+
+:::note
+
+具有 `controlplane` 角色的节点在 UI 中显示为`不可调度`,即默认情况下不会将 Pod 调度到这些节点。
+
+:::
+
+### kube-apiserver
+
+Kubernetes API Server (`kube-apiserver`) 能水平扩展。如果节点具有需要访问 Kubernetes API Server 的组件,则每个具有 `controlplane` 角色的节点都将被添加到节点上的 NGINX 代理中。这意味着如果一个节点变得不可访问,该节点上的本地 NGINX 代理会将请求转发到列表中的另一个 Kubernetes API Server。
+
+### kube-controller-manager
+
+Kubernetes Controller Manager 使用 Kubernetes 中的端点进行 Leader 选举。`kube-controller-manager` 的一个实例将在 Kubernetes 端点中创建一个条目,并在配置的时间间隔内更新该条目。其他实例将看到一个状态为 Active 的 Leader,并等待该条目过期(例如节点无响应)。
+
+### kube-scheduler
+
+Kubernetes 调度器使用 Kubernetes 中的端点进行 Leader 选举。`kube-scheduler` 的一个实例将在 Kubernetes 端点中创建一个条目,并在配置的时间间隔内更新该条目。其他实例将看到一个状态为 Active 的 Leader,并等待该条目过期(例如节点无响应)。
+
+## worker
+
+具有 `worker` 角色的节点运行 Kubernetes 节点组件。有关组件的详细列表,请参阅 [Kubernetes:节点组件](https://kubernetes.io/docs/concepts/overview/components/#node-components)。
+
+## 参考
+
+* [Kubernetes:节点组件](https://kubernetes.io/docs/concepts/overview/components/#node-components)
\ No newline at end of file
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
similarity index 67%
rename from i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md
rename to i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
index 702c3542c7e..8cd13712b03 100644
--- a/i18n/zh/docusaurus-plugin-content-docs/version-2.8/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/kubernetes-clusters-in-rancher-setup.md
@@ -1,21 +1,23 @@
---
-title: 在 Rancher 中设置 Kubernetes 集群
+title: Rancher 中的 Kubernetes 集群设置
description: 配置 Kubernetes 集群
---
+
+
+
+
Rancher 允许你通过 Rancher UI 来创建集群,从而简化了集群的创建流程。Rancher 提供了多种启动集群的选项。你可以选择最适合你的用例的选项。
-本节默认你已对 Docker 和 Kubernetes 有一定的了解。如果你需要了解 Kubernetes 组件如何协作,请参见 [Kubernetes 概念](../reference-guides/kubernetes-concepts.md)。
-
-有关 Rancher Server 配置集群的方式,以及使用什么工具来创建集群的详细信息,请参阅[产品架构](rancher-manager-architecture.md)。
-
+本节默认你已对 Docker 和 Kubernetes 有一定的了解。如果你需要了解 Kubernetes 组件如何协作,请参见 [Kubernetes 概念](../../../reference-guides/kubernetes-concepts.md)页面。
+有关 Rancher Server 配置集群的方式,以及使用什么工具来创建集群的详细信息,请参阅[产品架构](../../../reference-guides/rancher-manager-architecture/rancher-manager-architecture.md)页面。
### 不同类型集群的管理功能
下表总结了每一种类型的集群和对应的可编辑的选项和设置:
-import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md';
+import ClusterCapabilitiesTable from '../../../shared-files/\_cluster-capabilities-table.md';
@@ -25,7 +27,7 @@ import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-tabl
如果你使用 Kubernetes 提供商,例如 Google GKE,Rancher 将与对应的云 API 集成,允许你从 Rancher UI 为托管集群创建和管理 RBAC。
-详情请参阅[托管 Kubernetes 集群](set-up-clusters-from-hosted-kubernetes-providers.md)。
+详情请参阅[托管 Kubernetes 集群](set-up-clusters-from-hosted-kubernetes-providers/set-up-clusters-from-hosted-kubernetes-providers.md)部分。
## 使用 Rancher 启动 Kubernetes
@@ -37,23 +39,23 @@ import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-tabl
如果你已经有一个想要添加到 RKE 集群的节点,你可以通过在节点上运行 Rancher Agent 容器将节点添加到集群中。
-有关详细信息,请参阅 [RKE 集群](../pages-for-subheaders/launch-kubernetes-with-rancher.md)。
+有关详细信息,请参阅 [RKE 集群](../launch-kubernetes-with-rancher/launch-kubernetes-with-rancher.md)部分。
### 在基础设施提供商中启动 Kubernetes 并配置节点
Rancher 可以在 Amazon EC2、DigitalOcean、Azure 或 vSphere 等基础设施提供商中动态配置节点,然后在节点上安装 Kubernetes。
-使用 Rancher,你可以基于[节点模板](use-new-nodes-in-an-infra-provider.md#节点模板)创建节点池。此模板定义了要在云提供商中启动的节点的参数。
+使用 Rancher,你可以基于[节点模板](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#节点模板)创建节点池。此模板定义了要在云提供商中启动的节点的参数。
使用由基础设施提供商托管的节点的一个好处是,如果一个节点与集群失去连接,Rancher 可以自动替换它,从而维护集群配置。
-Rancher UI 中状态为 Active 的[主机驱动](use-new-nodes-in-an-infra-provider.md#主机驱动)决定了可用于创建节点模板的云提供商。
+Rancher UI 中状态为 Active 的[主机驱动](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md#主机驱动)决定了可用于创建节点模板的云提供商。
-如需更多信息,请参阅[基础设施提供商托管的节点](use-new-nodes-in-an-infra-provider.md)。
+如需更多信息,请参阅[基础设施提供商托管的节点](../launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/use-new-nodes-in-an-infra-provider.md)部分。
### 在现有自定义节点上启动 Kubernetes
-在设置这种类型的集群时,Rancher 会在现有的[自定义节点](use-existing-nodes.md)上安装 Kubernetes,从而创建一个自定义集群。
+在设置这种类型的集群时,Rancher 会在现有的[自定义节点](../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/use-existing-nodes.md)上安装 Kubernetes,从而创建一个自定义集群。
你可以使用任何节点,在 Rancher 中创建一个集群。
@@ -67,7 +69,7 @@ Rancher UI 中状态为 Active 的[主机驱动](use-new-nodes-in-an-infra-provi
删除在 Rancher 中创建的 EKS 集群后,该集群将被销毁。删除在 Rancher 中注册的 EKS 集群时,它与 Rancher Server 会断开连接,但它仍然存在。你仍然可以像在 Rancher 中注册之前一样访问它。
-详情请参见[本页面](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md)。
+详情请参见[本页面](register-existing-clusters.md)。
## 以编程方式创建集群
diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-amazon.md b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-amazon.md
new file mode 100644
index 00000000000..fb21c4e82ef
--- /dev/null
+++ b/i18n/zh/docusaurus-plugin-content-docs/version-2.9/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/migrate-to-an-out-of-tree-cloud-provider/migrate-to-out-of-tree-amazon.md
@@ -0,0 +1,194 @@
+---
+title: 将 Amazon 从树内迁移到树外
+---
+
+
+
+
+
+Kubernetes 不再在树内维护云提供商。在 Kubernetes v1.27 及更高版本中,树内云提供商已被移除。当你从树内提供商迁移到树外提供商时,Rancher UI 允许你升级到 Kubernetes v1.27。
+
+不过,如果你执行的是手动迁移,现有集群必须在迁移后升级到 Kubernetes v1.27 才能继续运行。
+
+要从树内云提供商迁移到树外 AWS 云提供商,必须停止现有集群的 kube 控制器管理器,并安装 AWS 云控制器管理器。有许多方法可以做到这一点。有关详情,请参阅有关[外部云控制器管理器](https://cloud-provider-aws.sigs.k8s.io/getting_started/)的 AWS 官方文档。
+
+如果可以接受迁移过程中出现一些停机,请按照说明[设置外部云提供商](../set-up-cloud-providers/amazon.md#using-the-out-of-tree-aws-cloud-provider)。这些说明概述了如何为新配置的集群配置树外云提供商。在设置过程中,会有一些停机,因为从旧云提供商停止运行到新云提供商开始运行之间会有一段时间的间隔。
+
+如果您的设置不能容忍任何控制平面停机,则必须启用领导者迁移。这有助于从 kube 控制器管理器中的控制器顺利过渡到云控制器管理器中的对应控制器。有关详细信息,请参阅 AWS 官方文档[使用领导者迁移](https://cloud-provider-aws.sigs.k8s.io/getting_started/)。
+
+:::note Important:
+Kubernetes [云控制器迁移文档](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin)指出,可以使用相同的 Kubernetes 版本进行迁移,但假设迁移是 Kubernetes 升级的一部分。请参考有关[迁移到要使用的云控制器管理器](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/)的 Kubernetes 文档,了解迁移前是否需要自定义设置。确认[迁移配置值](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration)。如果您的云提供商提供 Node IPAM 控制器的实现,您还需要迁移 [IPAM 控制器](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration)。
+:::
+
+
+